├── LICENSE ├── README.md ├── Train_model_frontend.py ├── Train_model_heatmap.py ├── Train_model_subpixel.py ├── Val_model_heatmap.py ├── Val_model_subpixel.py ├── configs ├── classical_descriptors.yaml ├── magicpoint_coco_export.yaml ├── magicpoint_kitti_export.yaml ├── magicpoint_kitti_train.yaml ├── magicpoint_repeatability_heatmap.yaml ├── magicpoint_shapes_pair.yaml ├── superpoint_coco_train_heatmap.yaml ├── superpoint_kitti_train_heatmap.yaml └── superpoint_tum_train_heatmap.yaml ├── datasets ├── Apollo.py ├── Coco.py ├── Kitti_inh.py ├── SyntheticDataset_gaussian.py ├── Tum.py ├── __init__.py ├── base_dataset.py ├── data_tools.py ├── kitti │ ├── 2011_09_26_drive_0001_sync_02 │ │ ├── 0000000000.jpg │ │ ├── 0000000000.npy │ │ ├── 0000000001.jpg │ │ ├── 0000000001.npy │ │ ├── 0000000002.jpg │ │ ├── 0000000002.npy │ │ ├── 0000000003.jpg │ │ ├── 0000000003.npy │ │ ├── 0000000004.jpg │ │ ├── 0000000004.npy │ │ ├── 0000000005.jpg │ │ ├── 0000000005.npy │ │ ├── 0000000006.jpg │ │ ├── 0000000006.npy │ │ ├── 0000000007.jpg │ │ ├── 0000000007.npy │ │ ├── 0000000008.jpg │ │ ├── 0000000008.npy │ │ ├── 0000000009.jpg │ │ ├── 0000000009.npy │ │ ├── 0000000010.jpg │ │ ├── 0000000010.npy │ │ ├── 0000000011.jpg │ │ ├── 0000000011.npy │ │ ├── 0000000012.jpg │ │ ├── 0000000012.npy │ │ ├── 0000000013.jpg │ │ ├── 0000000013.npy │ │ ├── 0000000014.jpg │ │ ├── 0000000014.npy │ │ ├── 0000000015.jpg │ │ ├── 0000000015.npy │ │ ├── 0000000016.jpg │ │ ├── 0000000016.npy │ │ ├── 0000000017.jpg │ │ ├── 0000000017.npy │ │ ├── 0000000018.jpg │ │ ├── 0000000018.npy │ │ ├── 0000000019.jpg │ │ ├── 0000000019.npy │ │ ├── 0000000020.jpg │ │ ├── 0000000020.npy │ │ ├── 0000000021.jpg │ │ ├── 0000000021.npy │ │ ├── 0000000022.jpg │ │ ├── 0000000022.npy │ │ ├── 0000000023.jpg │ │ ├── 0000000023.npy │ │ ├── 0000000024.jpg │ │ ├── 0000000024.npy │ │ ├── 0000000025.jpg │ │ ├── 0000000025.npy │ │ ├── 0000000026.jpg │ │ ├── 0000000026.npy │ │ ├── 0000000027.jpg │ │ ├── 0000000027.npy │ │ ├── 0000000028.jpg │ │ ├── 0000000028.npy │ │ ├── 0000000029.jpg │ │ ├── 0000000029.npy │ │ ├── 0000000030.jpg │ │ ├── 0000000030.npy │ │ ├── 0000000031.jpg │ │ ├── 0000000031.npy │ │ ├── 0000000032.jpg │ │ ├── 0000000032.npy │ │ ├── 0000000033.jpg │ │ ├── 0000000033.npy │ │ ├── 0000000034.jpg │ │ ├── 0000000034.npy │ │ ├── 0000000035.jpg │ │ ├── 0000000035.npy │ │ ├── 0000000036.jpg │ │ ├── 0000000036.npy │ │ ├── 0000000037.jpg │ │ ├── 0000000037.npy │ │ ├── 0000000038.jpg │ │ ├── 0000000038.npy │ │ ├── 0000000039.jpg │ │ ├── 0000000039.npy │ │ ├── 0000000040.jpg │ │ ├── 0000000040.npy │ │ ├── 0000000041.jpg │ │ ├── 0000000041.npy │ │ ├── 0000000042.jpg │ │ ├── 0000000042.npy │ │ ├── 0000000043.jpg │ │ ├── 0000000043.npy │ │ ├── 0000000044.jpg │ │ ├── 0000000044.npy │ │ ├── 0000000045.jpg │ │ ├── 0000000045.npy │ │ ├── 0000000046.jpg │ │ ├── 0000000046.npy │ │ ├── 0000000047.jpg │ │ ├── 0000000047.npy │ │ ├── 0000000048.jpg │ │ ├── 0000000048.npy │ │ ├── 0000000049.jpg │ │ ├── 0000000049.npy │ │ ├── 0000000050.jpg │ │ ├── 0000000050.npy │ │ ├── 0000000051.jpg │ │ ├── 0000000051.npy │ │ ├── 0000000052.jpg │ │ ├── 0000000052.npy │ │ ├── 0000000053.jpg │ │ ├── 0000000053.npy │ │ ├── 0000000054.jpg │ │ ├── 0000000054.npy │ │ ├── 0000000055.jpg │ │ ├── 0000000055.npy │ │ ├── 0000000056.jpg │ │ ├── 0000000056.npy │ │ ├── 0000000057.jpg │ │ ├── 0000000057.npy │ │ ├── 0000000058.jpg │ │ ├── 0000000058.npy │ │ ├── 0000000059.jpg │ │ ├── 0000000059.npy │ │ ├── 0000000060.jpg │ │ ├── 0000000060.npy │ │ ├── 0000000061.jpg │ │ ├── 0000000061.npy │ │ ├── 0000000062.jpg │ │ ├── 0000000062.npy │ │ ├── 0000000063.jpg │ │ ├── 0000000063.npy │ │ ├── 0000000064.jpg │ │ ├── 0000000064.npy │ │ ├── 0000000065.jpg │ │ ├── 0000000065.npy │ │ ├── 0000000066.jpg │ │ ├── 0000000066.npy │ │ ├── 0000000067.jpg │ │ ├── 0000000067.npy │ │ ├── 0000000068.jpg │ │ ├── 0000000068.npy │ │ ├── 0000000069.jpg │ │ ├── 0000000069.npy │ │ ├── 0000000070.jpg │ │ ├── 0000000070.npy │ │ ├── 0000000071.jpg │ │ ├── 0000000071.npy │ │ ├── 0000000072.jpg │ │ ├── 0000000072.npy │ │ ├── 0000000073.jpg │ │ ├── 0000000073.npy │ │ ├── 0000000074.jpg │ │ ├── 0000000074.npy │ │ ├── 0000000075.jpg │ │ ├── 0000000075.npy │ │ ├── 0000000076.jpg │ │ ├── 0000000076.npy │ │ ├── 0000000077.jpg │ │ ├── 0000000077.npy │ │ ├── 0000000078.jpg │ │ ├── 0000000078.npy │ │ ├── 0000000079.jpg │ │ ├── 0000000079.npy │ │ ├── 0000000080.jpg │ │ ├── 0000000080.npy │ │ ├── 0000000081.jpg │ │ ├── 0000000081.npy │ │ ├── 0000000082.jpg │ │ ├── 0000000082.npy │ │ ├── 0000000083.jpg │ │ ├── 0000000083.npy │ │ ├── 0000000084.jpg │ │ ├── 0000000084.npy │ │ ├── 0000000085.jpg │ │ ├── 0000000085.npy │ │ ├── 0000000086.jpg │ │ ├── 0000000086.npy │ │ ├── 0000000087.jpg │ │ ├── 0000000087.npy │ │ ├── 0000000088.jpg │ │ ├── 0000000088.npy │ │ ├── 0000000089.jpg │ │ ├── 0000000089.npy │ │ ├── 0000000090.jpg │ │ ├── 0000000090.npy │ │ ├── 0000000091.jpg │ │ ├── 0000000091.npy │ │ ├── 0000000092.jpg │ │ ├── 0000000092.npy │ │ ├── 0000000093.jpg │ │ ├── 0000000093.npy │ │ ├── 0000000094.jpg │ │ ├── 0000000094.npy │ │ ├── 0000000095.jpg │ │ ├── 0000000095.npy │ │ ├── 0000000096.jpg │ │ ├── 0000000096.npy │ │ ├── 0000000097.jpg │ │ ├── 0000000097.npy │ │ ├── 0000000098.jpg │ │ ├── 0000000098.npy │ │ ├── 0000000099.jpg │ │ ├── 0000000099.npy │ │ ├── 0000000100.jpg │ │ ├── 0000000100.npy │ │ ├── 0000000101.jpg │ │ ├── 0000000101.npy │ │ ├── 0000000102.jpg │ │ ├── 0000000102.npy │ │ ├── 0000000103.jpg │ │ ├── 0000000103.npy │ │ ├── 0000000104.jpg │ │ ├── 0000000104.npy │ │ ├── 0000000105.jpg │ │ ├── 0000000105.npy │ │ ├── 0000000106.jpg │ │ ├── 0000000106.npy │ │ ├── 0000000107.jpg │ │ ├── 0000000107.npy │ │ ├── cam.txt │ │ └── poses.txt │ ├── kitti_test │ │ ├── 0000000000.npz │ │ ├── 0000000000_0.png │ │ ├── 0000000000_1.png │ │ └── test.txt │ └── train.txt ├── kitti_split │ ├── train.txt │ └── val.txt ├── patches_dataset.py ├── synthetic_dataset.py ├── synthetic_shapes.py ├── tum_split │ ├── train.txt │ └── val.txt └── utils │ ├── __init__.py │ ├── augmentation_legacy.py │ ├── photometric_augmentation.py │ ├── pipeline.py │ └── util.py ├── evaluation.py ├── evaluations ├── descriptor_evaluation.py └── detector_evaluation.py ├── export.py ├── export_classical.py ├── logs ├── magicpoint_synth20 │ └── checkpoints │ │ └── superPointNet_200000_checkpoint.pth.tar ├── magicpoint_synth_t2 │ ├── checkpoints │ │ └── superPointNet_100000_checkpoint.pth.tar │ └── config.yml ├── superpoint_coco_heat2_0 │ ├── checkpoints │ │ ├── superPointNet_170000_checkpoint.pth.tar │ │ └── superPointNet_90000_checkpoint.pth.tar │ └── config.yml ├── superpoint_coco_heat2_0_170k_nms4_det0.015 │ ├── config.yml │ └── predictions │ │ ├── 0.npz │ │ ├── 1.npz │ │ ├── 2.npz │ │ ├── 3.npz │ │ ├── 4.npz │ │ ├── 5.npz │ │ ├── 6.npz │ │ ├── 7.npz │ │ ├── matching │ │ ├── 0cv.png │ │ ├── 0m.png │ │ ├── 1cv.png │ │ ├── 1m.png │ │ ├── 2cv.png │ │ ├── 2m.png │ │ ├── 3cv.png │ │ ├── 3m.png │ │ ├── 4cv.png │ │ ├── 4m.png │ │ ├── 5cv.png │ │ ├── 5m.png │ │ ├── 6cv.png │ │ └── 6m.png │ │ ├── repeatibility3 │ │ ├── 0.png │ │ ├── 1.png │ │ ├── 2.png │ │ ├── 3.png │ │ ├── 4.png │ │ ├── 5.png │ │ └── 6.png │ │ └── warping │ │ ├── 0.png │ │ ├── 1.png │ │ ├── 2.png │ │ ├── 3.png │ │ ├── 4.png │ │ ├── 5.png │ │ └── 6.png └── superpoint_kitti_heat2_0 │ ├── checkpoints │ └── superPointNet_50000_checkpoint.pth.tar │ └── config.yml ├── models ├── SubpixelNet.py ├── SuperPointNet.py ├── SuperPointNet_gauss2.py ├── SuperPointNet_pretrained.py ├── __init__.py ├── classical_detectors_descriptors.py ├── homographies.py ├── model_utils.py ├── model_wrap.py └── unet_parts.py ├── notebooks ├── Untitled.ipynb ├── analysis_hpatches.ipynb ├── analysis_hpatches_repeatability.ipynb ├── dataLoader_test.ipynb ├── descriptors_evaluation_on_coco.ipynb ├── evaluation_visualization.ipynb ├── export_homography_adaptation.ipynb ├── export_visualization.ipynb ├── h2.npz ├── homography_adaptation.ipynb ├── homography_visualization.ipynb ├── kitti_correspondence.ipynb ├── loss_functions.ipynb ├── sort_kitti_dataset.ipynb ├── sparse_descriptor_loss.ipynb ├── test_ap.ipynb ├── test_classical_descriptors.ipynb ├── test_export.ipynb ├── test_gaussian_kernel.ipynb ├── test_grid_sample.ipynb ├── test_model_front_end.ipynb ├── test_sparse_loss.ipynb ├── test_sparse_loss.py ├── test_train_model_frontend.ipynb ├── torch_test.ipynb ├── useful_modules.ipynb ├── valid_mask.ipynb └── visualize_hpatches.ipynb ├── pretrained └── superpoint_v1.pth ├── requirements.txt ├── requirements_torch.txt ├── run_export.sh ├── settings.py ├── test ├── sample_homography.py └── visualize_warping.py ├── train4.py ├── train_tutorial.md └── utils ├── __init__.py ├── correspondence_tools ├── __init__.py ├── correspondence_augmentation.py ├── correspondence_finder.py └── correspondence_plotter.py ├── cp_labels.py ├── d2s.py ├── draw.py ├── homographies.py ├── loader.py ├── logging.py ├── loss_functions ├── __init__.py ├── loss_composer.py ├── pixelwise_contrastive_loss.py └── sparse_loss.py ├── losses.py ├── photometric.py ├── photometric_augmentation.py ├── print_tool.py ├── tools.py ├── utils.py └── var_dim.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Eric Jau 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Val_model_subpixel.py: -------------------------------------------------------------------------------- 1 | """script for subpixel experiment (not tested) 2 | """ 3 | 4 | import numpy as np 5 | import torch 6 | from torch.autograd import Variable 7 | import torch.backends.cudnn as cudnn 8 | import torch.optim 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | import torch.utils.data 12 | from tqdm import tqdm 13 | from utils.loader import dataLoader, modelLoader, pretrainedLoader 14 | import logging 15 | 16 | from utils.tools import dict_update 17 | 18 | from utils.utils import labels2Dto3D, flattenDetection, labels2Dto3D_flattened 19 | 20 | from utils.utils import pltImshow, saveImg 21 | from utils.utils import precisionRecall_torch 22 | from utils.utils import save_checkpoint 23 | 24 | from pathlib import Path 25 | 26 | @torch.no_grad() 27 | class Val_model_subpixel(object): 28 | def __init__(self, config, device='cpu', verbose=False): 29 | self.config = config 30 | self.model = self.config['name'] 31 | self.params = self.config['params'] 32 | self.weights_path = self.config['pretrained'] 33 | self.device=device 34 | pass 35 | 36 | 37 | def loadModel(self): 38 | # model = 'SuperPointNet' 39 | # params = self.config['model']['subpixel']['params'] 40 | from utils.loader import modelLoader 41 | self.net = modelLoader(model=self.model, **self.params) 42 | 43 | checkpoint = torch.load(self.weights_path, 44 | map_location=lambda storage, loc: storage) 45 | self.net.load_state_dict(checkpoint['model_state_dict']) 46 | 47 | self.net = self.net.to(self.device) 48 | logging.info('successfully load pretrained model from: %s', self.weights_path) 49 | pass 50 | 51 | def extract_patches(self, label_idx, img): 52 | """ 53 | input: 54 | label_idx: tensor [N, 4]: (batch, 0, y, x) 55 | img: tensor [batch, channel(1), H, W] 56 | """ 57 | from utils.losses import extract_patches 58 | patch_size = self.config['params']['patch_size'] 59 | patches = extract_patches(label_idx.to(self.device), img.to(self.device), 60 | patch_size=patch_size) 61 | return patches 62 | pass 63 | 64 | def run(self, patches): 65 | """ 66 | 67 | 68 | """ 69 | with torch.no_grad(): 70 | pred_res = self.net(patches) 71 | return pred_res 72 | pass 73 | 74 | 75 | if __name__ == '__main__': 76 | # filename = 'configs/magicpoint_shapes_subpix.yaml' 77 | filename = 'configs/magicpoint_repeatability.yaml' 78 | import yaml 79 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 80 | 81 | torch.set_default_tensor_type(torch.FloatTensor) 82 | with open(filename, 'r') as f: 83 | config = yaml.load(f) 84 | 85 | task = config['data']['dataset'] 86 | # data loading 87 | from utils.loader import dataLoader_test as dataLoader 88 | data = dataLoader(config, dataset='hpatches') 89 | test_set, test_loader = data['test_set'], data['test_loader'] 90 | 91 | # take one sample 92 | for i, sample in tqdm(enumerate(test_loader)): 93 | if i>1: break 94 | 95 | 96 | val_agent = Val_model_subpixel(config['subpixel'], device=device) 97 | val_agent.loadModel() 98 | # points from heatmap 99 | img = sample['image'] 100 | print("image: ", img.shape) 101 | points = torch.tensor([[1,2], [3,4]]) 102 | def points_to_4d(points): 103 | num_of_points = points.shape[0] 104 | cols = torch.zeros(num_of_points, 1).float() 105 | points = torch.cat((cols, cols, points.float()), dim=1) 106 | return points 107 | label_idx = points_to_4d(points) 108 | # concat points to be (batch, 0, y, x) 109 | patches = val_agent.extract_patches(label_idx, img) 110 | points_res = val_agent.run(patches) 111 | 112 | 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /configs/classical_descriptors.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | name: 'patches_dataset' 3 | dataset: 'hpatches' # 'hpatches' 'coco' 4 | alteration: 'all' # 'i' 'v' 'all' 5 | cache_in_memory: false 6 | validation_size: 100 7 | preprocessing: 8 | resize: [240, 320] # False for coco 9 | # resize: [480, 640] # False for coco 10 | model: 11 | name: 'classical_detectors_descriptors' 12 | method: 'sift' # 'orb' 'sift' 13 | batch_size: 1 # unused 14 | learning_rate: 0.001 # unused 15 | nms: 4 16 | top_k: 1000 17 | eval_iter: 600 18 | seed: 1 19 | -------------------------------------------------------------------------------- /configs/magicpoint_coco_export.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | dataset: 'Coco' # 'coco' 'hpatches' 3 | export_folder: 'train' # train, val 4 | preprocessing: 5 | resize: [240, 320] 6 | # resize: [480, 640] 7 | gaussian_label: 8 | enable: false # false 9 | sigma: 1. 10 | augmentation: 11 | photometric: 12 | enable: false 13 | homography_adaptation: 14 | enable: true 15 | num: 100 # 100 16 | aggregation: 'sum' 17 | filter_counts: 0 18 | homographies: 19 | params: 20 | translation: true 21 | rotation: true 22 | scaling: true 23 | perspective: true 24 | scaling_amplitude: 0.2 25 | perspective_amplitude_x: 0.2 26 | perspective_amplitude_y: 0.2 27 | allow_artifacts: true 28 | patch_ratio: 0.85 29 | 30 | training: 31 | workers_test: 2 32 | 33 | model: 34 | # name: 'SuperPointNet' # 'SuperPointNet_gauss2' 35 | name: 'SuperPointNet_gauss2' # 'SuperPointNet_gauss2' 36 | params: { 37 | } 38 | batch_size: 1 39 | eval_batch_size: 1 40 | detection_threshold: 0.015 # 0.015 41 | nms: 4 42 | top_k: 600 43 | subpixel: 44 | enable: true 45 | 46 | # pretrained: 'logs/magicpoint_synth20/checkpoints/superPointNet_200000_checkpoint.pth.tar' # 'SuperPointNet' 47 | pretrained: 'logs/magicpoint_synth_t2/checkpoints/superPointNet_100000_checkpoint.pth.tar' 48 | 49 | 50 | -------------------------------------------------------------------------------- /configs/magicpoint_kitti_export.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | # name: 'kitti' 3 | dataset: 'Kitti_inh' # 'coco' 'hpatches', 'Kitti', '' 4 | export_folder: 'train' 5 | alteration: 'all' # 'all' 'i' 'v' 6 | root: 'datasets/kitti_wVal' # root for dataset 7 | root_split_txt: 'datasets/kitti_split' # split file provided in datasets/kitti_split 8 | preprocessing: 9 | resize: [384, 1248] # hand defined, original: [375, 1242] 10 | # resize: [192, 624] # hand defined 11 | # resize: [96, 312] # hand defined 12 | gaussian_label: 13 | enable: false # false 14 | sigma: 1. 15 | homography_adaptation: 16 | enable: true 17 | num: 20 # 100 18 | aggregation: 'sum' 19 | filter_counts: 0 20 | homographies: 21 | params: 22 | translation: true 23 | rotation: true 24 | scaling: true 25 | perspective: true 26 | scaling_amplitude: 0.2 27 | perspective_amplitude_x: 0.2 28 | perspective_amplitude_y: 0.2 29 | allow_artifacts: true 30 | patch_ratio: 0.85 31 | # name: 'coco' 32 | # cache_in_memory: false 33 | # validation_size: 100 34 | model: 35 | # name: 'SuperPointNet' # need to use old version of network 36 | name: 'SuperPointNet_gauss2' # 'SuperPointNet_gauss2' 37 | batch_size: 1 38 | detection_threshold: 0.015 # 0.015 39 | nms: 4 40 | top_k: 600 # no use 41 | 42 | params: {} 43 | subpixel: 44 | enable: false 45 | 46 | # eval_iter: -1 47 | 48 | # pretrained: 'logs/magicpoint_synth20/checkpoints/superPointNet_200000_checkpoint.pth.tar' 49 | pretrained: 'logs/magicpoint_synth_t2/checkpoints/superPointNet_100000_checkpoint.pth.tar' 50 | 51 | 52 | -------------------------------------------------------------------------------- /configs/magicpoint_kitti_train.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | name: 'kitti' 3 | dataset: 'kitti' # 'coco' 'hpatches' 4 | # labels: superpoint_base199_homoAdapt_coco_top600_resize/predictions # Complete with your export labels 5 | # labels: superpoint_kitti-test/predictions 6 | # labels: magicpoint_synth20_homoAdapt100_kitti/predictions 7 | labels: magicpoint_synth20_homoAdapt100_kitti_r0.5/predictions_wVal 8 | 9 | 10 | validation_size: 192 11 | preprocessing: 12 | # resize: [192, 624] 13 | resize: [96, 312] 14 | gaussian_label: 15 | enable: false # false 16 | sigma: 1. 17 | augmentation: 18 | photometric: 19 | enable: true 20 | enable_train: true 21 | enable_val: false 22 | primitives: [ 23 | 'random_brightness', 'random_contrast', 'additive_speckle_noise', 24 | 'additive_gaussian_noise', 'additive_shade', 'motion_blur' ] 25 | params: 26 | random_brightness: {max_abs_change: 50} 27 | random_contrast: {strength_range: [0.3, 1.5]} 28 | additive_gaussian_noise: {stddev_range: [0, 10]} 29 | additive_speckle_noise: {prob_range: [0, 0.0035]} 30 | additive_shade: 31 | transparency_range: [-0.5, 0.5] 32 | kernel_size_range: [100, 150] 33 | motion_blur: {max_kernel_size: 3} 34 | homographic: 35 | enable: true # true 36 | enable_train: true 37 | enable_val: false 38 | params: 39 | translation: true 40 | rotation: true 41 | scaling: true 42 | perspective: true 43 | scaling_amplitude: 0.2 44 | perspective_amplitude_x: 0.2 45 | perspective_amplitude_y: 0.2 46 | patch_ratio: 0.85 47 | max_angle: 1.57 48 | allow_artifacts: true 49 | valid_border_margin: 3 50 | 51 | warped_pair: 52 | enable: false 53 | params: 54 | translation: true 55 | rotation: true 56 | scaling: true 57 | perspective: true 58 | scaling_amplitude: 0.2 59 | perspective_amplitude_x: 0.2 60 | perspective_amplitude_y: 0.2 61 | patch_ratio: 0.85 62 | max_angle: 1.57 63 | allow_artifacts: true # true 64 | valid_border_margin: 3 65 | 66 | model: 67 | name: 'magic_point' 68 | batch_size: 32 # 32 69 | eval_batch_size: 32 70 | learning_rate: 0.001 71 | detection_threshold: 0.001 # 0.015 72 | nms: 4 73 | # top_k: 300 74 | retrain: false # set true for new model 75 | reset_iter: true 76 | #pretrained: 'logs/magicpoint_coco/checkpoints/superPointNet_529_checkpoint.pth.tar' 77 | # pretrained: 'logs/magicpoint_synth_rui_thres4_softmaxce/checkpoints/superPointNet_200007_checkpoint.pth.tar' 78 | # pretrained: '/home/yoyee/Documents/deepSfm/logs/magicpoint_synth15/checkpoints/superPointNet_200208_checkpoint.pth.tar' 79 | # pretrained: '/home/yoyee/Documents/deepSfm/logs/magicpoint_synth14/checkpoints/superPointNet_200111_checkpoint.pth.tar' 80 | # pretrained: 'logs/magicpoint_coco5/checkpoints/superPointNet_84608_checkpoint.pth.tar' 81 | #pretrained: 'pretrained/superpoint_v1.pth' 82 | # 83 | pretrained: 'logs/magicpoint_synth20/checkpoints/superPointNet_200000_checkpoint.pth.tar' 84 | 85 | train_iter: 200000 86 | # validation_interval: 1000 87 | validation_interval: 500 # 500 # one validation of entire val set every N training steps 88 | train_show_interval: 1000 # one show of the current training from to Tensorboard every N training steps 89 | -------------------------------------------------------------------------------- /configs/magicpoint_repeatability_heatmap.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | name: 'patches_dataset' 3 | dataset: 'hpatches' # 'coco' 'hpatches' 4 | alteration: 'all' # 'all' 'i' 'v' 5 | preprocessing: 6 | resize: [240, 320] # [240, 320] for HPatches and False for coco 7 | # resize: [480, 640] # [240, 320] for HPatches and False for coco 8 | # labels: magicpoint_synth20_homoAdapt100_coco/predictions # for coco 9 | 10 | front_end_model: 'Val_model_heatmap' # 'Train_model_frontend' 11 | model: 12 | # name: 'magic_point' 13 | name: 'SuperPointNet_gauss2' # SuperPointNet_heatmap 14 | params: { 15 | } 16 | 17 | # learning_rate: 0.0001 # 0.0001 18 | detection_threshold: 0.015 # 0.015 19 | 20 | batch_size: 1 21 | eval_batch_size: 1 22 | # output parameters 23 | learning_rate: 0.001 24 | detection_threshold: 0.015 # 0.001 25 | nms: 4 26 | top_k: 1000 27 | nn_thresh: 1.0 # 0.7 28 | homography_adaptation: 29 | num: 0 30 | subpixel: 31 | enable: true 32 | patch_size: 5 33 | rand_noise: 34 | enable: false 35 | sigma: 0.2 36 | # pretrained: 'logs/superpoint_kitti_heat2_0/checkpoints/superPointNet_50000_checkpoint.pth.tar' 37 | # pretrained: 'logs/superpoint_spollo_v0/checkpoints/superPointNet_40000_checkpoint.pth.tar' 38 | # pretrained: 'logs/superpoint_coco/checkpoints/superPointNet_180_checkpoint.pth.tar' 39 | pretrained: 'logs/superpoint_coco_heat2_0/checkpoints/superPointNet_170000_checkpoint.pth.tar' 40 | 41 | 42 | 43 | eval_iter: 1000 44 | -------------------------------------------------------------------------------- /configs/magicpoint_shapes_pair.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | # name: 'synthetic_shapes' 3 | dataset: 'SyntheticDataset_gaussian' 4 | primitives: 'all' 5 | truncate: {draw_ellipses: 0.3, draw_stripes: 0.2, gaussian_noise: 0.1} 6 | cache_in_memory: true 7 | suffix: 'v6' 8 | add_augmentation_to_test_set: false # set to true to evaluate with noise 9 | gaussian_label: 10 | enable: false 11 | params: 12 | GaussianBlur: {sigma: 0.2} 13 | preprocessing: ## didn't do this 14 | blur_size: 21 15 | resize: [120, 160] 16 | augmentation: 17 | photometric: 18 | enable: true ## for class to recognize 19 | enable_train: true 20 | enable_val: false 21 | primitives: [ 22 | 'random_brightness', 'random_contrast', 'additive_speckle_noise', 23 | 'additive_gaussian_noise', 'additive_shade', 'motion_blur' ] 24 | params: 25 | random_brightness: {max_abs_change: 75} 26 | random_contrast: {strength_range: [0.3, 1.8]} 27 | additive_gaussian_noise: {stddev_range: [0, 15]} 28 | additive_speckle_noise: {prob_range: [0, 0.0035]} 29 | additive_shade: 30 | transparency_range: [-0.5, 0.8] 31 | kernel_size_range: [50, 100] 32 | motion_blur: {max_kernel_size: 7} # origin 7 33 | homographic: 34 | enable: true 35 | enable_train: true 36 | enable_val: false 37 | params: 38 | translation: true 39 | rotation: true 40 | scaling: true 41 | perspective: true 42 | scaling_amplitude: 0.2 43 | perspective_amplitude_x: 0.2 44 | perspective_amplitude_y: 0.2 45 | patch_ratio: 0.8 46 | max_angle: 1.57 # 3.14 47 | allow_artifacts: true 48 | translation_overflow: 0.05 49 | valid_border_margin: 2 50 | warped_pair: 51 | enable: false # false when training only on detector 52 | params: 53 | translation: true 54 | rotation: true 55 | scaling: true 56 | perspective: true 57 | scaling_amplitude: 0.2 58 | perspective_amplitude_x: 0.2 59 | perspective_amplitude_y: 0.2 60 | patch_ratio: 0.85 61 | max_angle: 1.57 62 | allow_artifacts: true # true 63 | valid_border_margin: 3 64 | 65 | front_end_model: 'Train_model_heatmap' # 'Train_model_frontend' 66 | 67 | model: 68 | name: 'SuperPointNet_gauss2' 69 | params: { 70 | } 71 | detector_loss: 72 | loss_type: 'softmax' 73 | 74 | batch_size: 64 # 64 75 | eval_batch_size: 16 76 | learning_rate: 0.001 77 | kernel_reg: 0. 78 | detection_threshold: 0.001 # 1/65 79 | nms: 4 80 | lambda_loss: 0 # disable descriptor loss 81 | dense_loss: 82 | enable: false 83 | params: 84 | descriptor_dist: 4 # 4, 7.5 85 | lambda_d: 800 # 800 86 | sparse_loss: 87 | enable: true 88 | params: 89 | num_matching_attempts: 1000 90 | num_masked_non_matches_per_match: 100 91 | lamda_d: 1 92 | dist: 'cos' 93 | method: '2d' 94 | other_settings: 'train 2d, gauss 0.5' 95 | 96 | retrain: True # set true for new model 97 | reset_iter: True 98 | 99 | train_iter: 200000 # 200000 100 | tensorboard_interval: 1000 # 200 101 | save_interval: 2000 # 2000 102 | validation_interval: 1000 # one validation of entire val set every N training steps 103 | validation_size: 10 104 | train_show_interval: 1000 # one show of the current training from to Tensorboard every N training steps 105 | seed: 0 106 | 107 | # pretrained: 'logs/superpoint_syn_heat1_0/checkpoints/superPointNet_100000_checkpoint.pth.tar' 108 | 109 | -------------------------------------------------------------------------------- /configs/superpoint_coco_train_heatmap.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | # name: 'coco' 3 | dataset: 'Coco' # 'coco' 4 | 5 | labels: datasets/magicpoint_synth20_homoAdapt100_coco_f1/predictions 6 | root: # datasets/COCO 7 | root_split_txt: # /datasets/COCO 8 | 9 | gaussian_label: 10 | enable: true 11 | params: 12 | GaussianBlur: {sigma: 0.2} 13 | 14 | 15 | cache_in_memory: false 16 | preprocessing: 17 | resize: [240, 320] 18 | # resize: [480, 640] 19 | augmentation: 20 | photometric: 21 | enable: true 22 | primitives: [ 23 | 'random_brightness', 'random_contrast', 'additive_speckle_noise', 24 | 'additive_gaussian_noise', 'additive_shade', 'motion_blur'] 25 | params: 26 | random_brightness: {max_abs_change: 50} 27 | random_contrast: {strength_range: [0.5, 1.5]} 28 | additive_gaussian_noise: {stddev_range: [0, 10]} 29 | additive_speckle_noise: {prob_range: [0, 0.0035]} 30 | additive_shade: 31 | transparency_range: [-0.5, 0.5] 32 | kernel_size_range: [100, 150] 33 | motion_blur: {max_kernel_size: 3} 34 | homographic: 35 | enable: false # not implemented 36 | warped_pair: 37 | enable: true 38 | params: 39 | translation: true 40 | rotation: true 41 | scaling: true 42 | perspective: true 43 | scaling_amplitude: 0.2 44 | perspective_amplitude_x: 0.2 45 | perspective_amplitude_y: 0.2 46 | patch_ratio: 0.85 47 | max_angle: 1.57 48 | allow_artifacts: true # true 49 | valid_border_margin: 3 50 | 51 | front_end_model: 'Train_model_heatmap' # 'Train_model_frontend' 52 | 53 | training: 54 | workers_train: 4 # 16 55 | workers_val: 2 # 2 56 | 57 | model: 58 | # name: 'magic_point' 59 | # name: 'SuperPointNet_heatmap' 60 | name: 'SuperPointNet_gauss2' 61 | params: { 62 | } 63 | detector_loss: 64 | loss_type: 'softmax' 65 | 66 | 67 | batch_size: 8 # 32 68 | eval_batch_size: 8 # 32 69 | learning_rate: 0.0001 # 0.0001 70 | detection_threshold: 0.015 # 0.015 71 | lambda_loss: 1 # 1 72 | nms: 4 73 | dense_loss: 74 | enable: false 75 | params: 76 | descriptor_dist: 4 # 4, 7.5 77 | lambda_d: 800 # 800 78 | sparse_loss: 79 | enable: true 80 | params: 81 | num_matching_attempts: 1000 82 | num_masked_non_matches_per_match: 100 83 | lamda_d: 1 84 | dist: 'cos' 85 | method: '2d' 86 | other_settings: 'train 2d, gauss 0.2' 87 | # subpixel: 88 | # enable: false 89 | # params: 90 | # subpixel_channel: 2 91 | # settings: 'predict flow directly' 92 | # loss_func: 'subpixel_loss_no_argmax' # subpixel_loss, subpixel_loss_no_argmax 93 | 94 | retrain: True # set true for new model 95 | reset_iter: True # set true to set the iteration number to 0 96 | train_iter: 200000 # 170000 97 | validation_interval: 200 # 2000 98 | tensorboard_interval: 200 # 200 99 | save_interval: 200 # 2000 100 | validation_size: 5 101 | 102 | pretrained: 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /configs/superpoint_kitti_train_heatmap.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | name: 'kitti' 3 | dataset: 'Kitti_inh' # 'Kitti' 4 | root: './datasets/KITTI/' # /data/kitti/kitti_wVal 5 | root_split_txt: 'datasets/kitti_split' 6 | labels: logs/magicpoint_synth20_homoAdapt100_kitti_h384_labels/predictions 7 | 8 | gaussian_label: 9 | enable: true 10 | params: 11 | GaussianBlur: {sigma: 0.2} 12 | 13 | cache_in_memory: false 14 | preprocessing: 15 | resize: [384, 1248] # hand defined, original: [375, 1242] 16 | # resize: [192, 624] 17 | augmentation: 18 | photometric: 19 | enable: true 20 | primitives: [ 21 | 'random_brightness', 'random_contrast', 'additive_speckle_noise', 22 | 'additive_gaussian_noise', 'additive_shade', 'motion_blur'] 23 | params: 24 | random_brightness: {max_abs_change: 50} 25 | random_contrast: {strength_range: [0.5, 1.5]} 26 | additive_gaussian_noise: {stddev_range: [0, 10]} 27 | additive_speckle_noise: {prob_range: [0, 0.0035]} 28 | additive_shade: 29 | transparency_range: [-0.5, 0.5] 30 | kernel_size_range: [100, 150] 31 | motion_blur: {max_kernel_size: 3} 32 | homographic: 33 | enable: false # not implemented 34 | params: 35 | {} 36 | warped_pair: 37 | enable: true 38 | params: 39 | translation: true 40 | rotation: true 41 | scaling: true 42 | perspective: true 43 | scaling_amplitude: 0.2 44 | perspective_amplitude_x: 0.2 45 | perspective_amplitude_y: 0.2 46 | patch_ratio: 0.85 47 | max_angle: 1.57 # 0.1 48 | allow_artifacts: true # true 49 | valid_border_margin: 3 50 | 51 | 52 | front_end_model: 'Train_model_heatmap' # 'Train_model_frontend' 53 | 54 | training: 55 | workers_train: 4 # 16 56 | workers_val: 2 # 2 57 | 58 | model: 59 | name: 'SuperPointNet_gauss2' 60 | params: { 61 | } 62 | detector_loss: 63 | loss_type: 'softmax' 64 | 65 | 66 | batch_size: 4 # 32 67 | eval_batch_size: 4 68 | learning_rate: 0.0001 69 | detection_threshold: 0.015 # 0.015 70 | nms: 4 71 | lambda_loss: 1 72 | # top_k: 300 73 | dense_loss: 74 | enable: false 75 | params: 76 | descriptor_dist: 4 # 4, 7.5 77 | lambda_d: 800 # 800 78 | sparse_loss: 79 | enable: true 80 | params: 81 | num_matching_attempts: 600 82 | num_masked_non_matches_per_match: 100 83 | lamda_d: 1 84 | dist: 'cos' 85 | method: '2d' 86 | other_settings: 'train from scratch, 2d method' 87 | lambda_res: 100 88 | 89 | retrain: true # set true for new model 90 | reset_iter: true 91 | train_iter: 170000 92 | validation_interval: 2000 93 | tensorboard_interval: 400 94 | save_interval: 2000 95 | validation_size: 5 96 | 97 | pretrained: 98 | # pretrained: 'logs/superpoint_coco_heat2_0/checkpoints/superPointNet_90000_checkpoint.pth.tar' 99 | 100 | 101 | -------------------------------------------------------------------------------- /configs/superpoint_tum_train_heatmap.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | # name: 'tum' 3 | dataset: 'Tum' # 'Kitti' 4 | root: '/data/tum/raw_sequences' # use in export, and training 5 | root_split_txt: 'datasets/tum_split' 6 | labels: logs/magicpoint_base_homoAdapt_tum_rawSeq/predictions 7 | 8 | gaussian_label: 9 | enable: true 10 | params: 11 | GaussianBlur: {sigma: 0.2} 12 | 13 | cache_in_memory: false 14 | preprocessing: 15 | resize: [480, 640] # hand defined, original: [375, 1242] 16 | augmentation: 17 | photometric: 18 | enable: true 19 | primitives: [ 20 | 'random_brightness', 'random_contrast', 'additive_speckle_noise', 21 | 'additive_gaussian_noise', 'additive_shade', 'motion_blur'] 22 | params: 23 | random_brightness: {max_abs_change: 50} 24 | random_contrast: {strength_range: [0.5, 1.5]} 25 | additive_gaussian_noise: {stddev_range: [0, 10]} 26 | additive_speckle_noise: {prob_range: [0, 0.0035]} 27 | additive_shade: 28 | transparency_range: [-0.5, 0.5] 29 | kernel_size_range: [100, 150] 30 | motion_blur: {max_kernel_size: 3} 31 | homographic: 32 | enable: false # not implemented 33 | params: 34 | {} 35 | warped_pair: 36 | enable: true 37 | params: 38 | translation: true 39 | rotation: true 40 | scaling: true 41 | perspective: true 42 | scaling_amplitude: 0.2 43 | perspective_amplitude_x: 0.2 44 | perspective_amplitude_y: 0.2 45 | patch_ratio: 0.85 46 | max_angle: 1.57 # 0.1 47 | allow_artifacts: true # true 48 | valid_border_margin: 3 49 | 50 | 51 | front_end_model: 'Train_model_heatmap' # 'Train_model_frontend' 52 | 53 | model: 54 | name: 'SuperPointNet_gauss2' 55 | params: { 56 | } 57 | detector_loss: 58 | loss_type: 'softmax' 59 | 60 | 61 | batch_size: 4 # 32 62 | eval_batch_size: 4 63 | learning_rate: 0.0001 64 | detection_threshold: 0.015 # 0.015 65 | nms: 4 66 | lambda_loss: 1 67 | # top_k: 300 68 | dense_loss: 69 | enable: false 70 | params: 71 | descriptor_dist: 4 # 4, 7.5 72 | lambda_d: 800 # 800 73 | sparse_loss: 74 | enable: true 75 | params: 76 | num_matching_attempts: 600 77 | num_masked_non_matches_per_match: 100 78 | lamda_d: 1 79 | dist: 'cos' 80 | method: '2d' 81 | other_settings: 'train from scratch, 2d method' 82 | lambda_res: 100 83 | 84 | retrain: true # set true for new model 85 | reset_iter: true 86 | train_iter: 170000 87 | validation_interval: 2000 88 | tensorboard_interval: 400 89 | save_interval: 2000 90 | validation_size: 5 91 | 92 | # pretrained: 'logs/superpoint_coco_heat2_0/checkpoints/superPointNet_90000_checkpoint.pth.tar' 93 | pretrained: 94 | 95 | 96 | -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | def get_dataset(name): 2 | mod = __import__('datasets.{}'.format(name), fromlist=['']) 3 | return getattr(mod, _module_to_class(name)) 4 | 5 | 6 | def _module_to_class(name): 7 | return ''.join(n.capitalize() for n in name.split('_')) 8 | -------------------------------------------------------------------------------- /datasets/base_dataset.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | import tensorflow as tf 3 | 4 | from utils.tools import dict_update 5 | 6 | 7 | class BaseDataset(metaclass=ABCMeta): 8 | """Base model class. 9 | 10 | Arguments: 11 | config: A dictionary containing the configuration parameters. 12 | 13 | Datasets should inherit from this class and implement the following methods: 14 | `_init_dataset` and `_get_data`. 15 | Additionally, the following static attributes should be defined: 16 | default_config: A dictionary of potential default configuration values (e.g. the 17 | size of the validation set). 18 | """ 19 | split_names = ['training', 'validation', 'test'] 20 | 21 | @abstractmethod 22 | def _init_dataset(self, **config): 23 | """Prepare the dataset for reading. 24 | 25 | This method should configure the dataset for later fetching through `_get_data`, 26 | such as downloading the data if it is not stored locally, or reading the list of 27 | data files from disk. Ideally, especially in the case of large images, this 28 | method shoudl NOT read all the dataset into memory, but rather prepare for faster 29 | seubsequent fetching. 30 | 31 | Arguments: 32 | config: A configuration dictionary, given during the object instantiantion. 33 | 34 | Returns: 35 | An object subsequently passed to `_get_data`, e.g. a list of file paths and 36 | set splits. 37 | """ 38 | raise NotImplementedError 39 | 40 | @abstractmethod 41 | def _get_data(self, dataset, split_name, **config): 42 | """Reads the dataset splits using the Tensorflow `tf.data` API. 43 | 44 | This method should create a `tf.data.Dataset` object for the given data split, 45 | with named components defined through a dictionary mapping strings to tensors. 46 | 47 | It typically performs operations such as reading data from a file or from a 48 | Python generator, shuffling the elements or applying data augmentation to the 49 | training split. It should however NOT batch the dataset (left to the model). 50 | 51 | Arguments: 52 | dataset: An object returned by the `_init_dataset` method. 53 | split_name: A string, the name of the requested split, either `"training"`, 54 | `"validation"` or `"test"`. 55 | config: A configuration dictionary, given during the object instantiantion. 56 | 57 | Returns: 58 | An object of type `tf.data.Dataset` corresponding to the corresponding split. 59 | """ 60 | raise NotImplementedError 61 | 62 | def get_tf_datasets(self): 63 | """"Exposes data splits consistent with the Tensorflow `tf.data` API. 64 | 65 | Returns: 66 | A dictionary mapping split names (`str`, either `"training"`, `"validation"`, 67 | or `"test"`) to `tf.data.Dataset` objects. 68 | """ 69 | return self.tf_splits 70 | 71 | def get_training_set(self): 72 | """Processed training set. 73 | 74 | Returns: 75 | A generator of elements from the training set as dictionaries mapping 76 | component names to the corresponding data (e.g. Numpy array). 77 | """ 78 | return self._get_set_generator('training') 79 | 80 | def get_validation_set(self): 81 | """Processed validation set. 82 | 83 | Returns: 84 | A generator of elements from the training set as dictionaries mapping 85 | component names to the corresponding data (e.g. Numpy array). 86 | """ 87 | return self._get_set_generator('validation') 88 | 89 | def get_test_set(self): 90 | """Processed test set. 91 | 92 | Returns: 93 | A generator of elements from the training set as dictionaries mapping 94 | component names to the corresponding data (e.g. Numpy array). 95 | """ 96 | return self._get_set_generator('test') 97 | 98 | def __init__(self, **config): 99 | # Update config 100 | self.config = dict_update(getattr(self, 'default_config', {}), config) 101 | 102 | self.dataset = self._init_dataset(**self.config) 103 | 104 | self.tf_splits = {} 105 | self.tf_next = {} 106 | with tf.device('/cpu:0'): 107 | for n in self.split_names: 108 | self.tf_splits[n] = self._get_data(self.dataset, n, **self.config) 109 | self.tf_next[n] = self.tf_splits[n].make_one_shot_iterator().get_next() 110 | self.end_set = tf.errors.OutOfRangeError 111 | self.sess = tf.Session() 112 | 113 | def _get_set_generator(self, set_name): 114 | while True: 115 | yield self.sess.run(self.tf_next[set_name]) 116 | -------------------------------------------------------------------------------- /datasets/data_tools.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | 4 | quan = lambda x: x.round().long() 5 | 6 | def extrapolate_points(pnts): 7 | pnts_int = pnts.long().type(torch.FloatTensor) 8 | pnts_x, pnts_y = pnts_int[:,0], pnts_int[:,1] 9 | 10 | stack_1 = lambda x, y: torch.stack((x, y), dim=1) 11 | pnts_ext = torch.cat((pnts_int, stack_1(pnts_x, pnts_y+1), 12 | stack_1(pnts_x+1, pnts_y), pnts_int+1), dim=0) 13 | 14 | pnts_res = pnts - pnts_int # (x, y) 15 | x_res, y_res = pnts_res[:,0], pnts_res[:,1] # residuals 16 | res_ext = torch.cat(((1-x_res)*(1-y_res), (1-x_res)*y_res, 17 | x_res*(1-y_res), x_res*y_res), dim=0) 18 | return pnts_ext, res_ext 19 | 20 | def scatter_points(warped_pnts, H, W, res_ext = 1): 21 | warped_labels = torch.zeros(H, W) 22 | warped_labels[quan(warped_pnts)[:, 1], quan(warped_pnts)[:, 0]] = res_ext 23 | warped_labels = warped_labels.view(-1, H, W) 24 | return warped_labels 25 | 26 | # from datasets.data_tools import get_labels_bi 27 | def get_labels_bi(warped_pnts, H, W): 28 | from utils.utils import filter_points 29 | pnts_ext, res_ext = extrapolate_points(warped_pnts) 30 | # quan = lambda x: x.long() 31 | pnts_ext, mask = filter_points(pnts_ext, torch.tensor([W, H]), return_mask=True) 32 | res_ext = res_ext[mask] 33 | warped_labels_bi = scatter_points(pnts_ext, H, W, res_ext = res_ext) 34 | return warped_labels_bi 35 | 36 | # from data_tools import warpLabels 37 | def warpLabels(pnts, H, W, homography, bilinear = False): 38 | from utils.utils import homography_scaling_torch as homography_scaling 39 | from utils.utils import filter_points 40 | from utils.utils import warp_points 41 | if isinstance(pnts, torch.Tensor): 42 | pnts = pnts.long() 43 | else: 44 | pnts = torch.tensor(pnts).long() 45 | warped_pnts = warp_points(torch.stack((pnts[:, 0], pnts[:, 1]), dim=1), 46 | homography_scaling(homography, H, W)) # check the (x, y) 47 | outs = {} 48 | # warped_pnts 49 | # print("extrapolate_points!!") 50 | 51 | # ext_points = True 52 | if bilinear == True: 53 | warped_labels_bi = get_labels_bi(warped_pnts, H, W) 54 | outs['labels_bi'] = warped_labels_bi 55 | 56 | warped_pnts = filter_points(warped_pnts, torch.tensor([W, H])) 57 | warped_labels = scatter_points(warped_pnts, H, W, res_ext = 1) 58 | 59 | warped_labels_res = torch.zeros(H, W, 2) 60 | warped_labels_res[quan(warped_pnts)[:, 1], quan(warped_pnts)[:, 0], :] = warped_pnts - warped_pnts.round() 61 | # print("res sum: ", (warped_pnts - warped_pnts.round()).sum()) 62 | outs.update({'labels': warped_labels, 'res': warped_labels_res, 'warped_pnts': warped_pnts}) 63 | return outs 64 | 65 | 66 | # from data_tools import np_to_tensor 67 | def np_to_tensor(img, H, W): 68 | img = torch.tensor(img).type(torch.FloatTensor).view(-1, H, W) 69 | return img 70 | 71 | 72 | if __name__ == '__main__': 73 | main() 74 | -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000000.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000000.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000000.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000001.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000001.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000001.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000002.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000002.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000002.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000003.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000003.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000003.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000004.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000004.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000004.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000004.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000005.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000005.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000005.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000005.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000006.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000006.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000006.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000007.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000007.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000007.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000007.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000008.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000008.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000008.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000009.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000009.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000009.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000009.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000010.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000010.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000010.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000011.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000011.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000011.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000011.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000012.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000012.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000012.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000012.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000013.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000013.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000013.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000013.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000014.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000014.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000014.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000014.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000015.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000015.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000015.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000015.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000016.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000016.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000016.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000016.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000017.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000017.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000017.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000017.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000018.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000018.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000018.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000018.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000019.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000019.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000019.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000019.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000020.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000020.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000020.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000020.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000021.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000021.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000021.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000021.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000022.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000022.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000022.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000022.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000023.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000023.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000023.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000023.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000024.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000024.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000024.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000024.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000025.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000025.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000025.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000025.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000026.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000026.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000026.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000026.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000027.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000027.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000027.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000027.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000028.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000028.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000028.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000028.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000029.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000029.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000029.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000029.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000030.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000030.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000030.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000030.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000031.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000031.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000031.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000031.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000032.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000032.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000032.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000032.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000033.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000033.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000033.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000033.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000034.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000034.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000034.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000034.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000035.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000035.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000035.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000035.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000036.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000036.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000036.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000036.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000037.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000037.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000037.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000037.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000038.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000038.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000038.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000038.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000039.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000039.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000039.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000039.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000040.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000040.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000040.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000040.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000041.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000041.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000041.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000041.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000042.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000042.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000042.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000042.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000043.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000043.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000043.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000043.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000044.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000044.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000044.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000044.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000045.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000045.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000045.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000045.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000046.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000046.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000046.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000046.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000047.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000047.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000047.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000047.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000048.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000048.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000048.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000048.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000049.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000049.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000049.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000049.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000050.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000050.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000050.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000050.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000051.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000051.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000051.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000051.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000052.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000052.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000052.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000052.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000053.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000053.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000053.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000053.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000054.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000054.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000054.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000054.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000055.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000055.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000055.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000055.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000056.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000056.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000056.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000056.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000057.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000057.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000057.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000057.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000058.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000058.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000058.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000058.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000059.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000059.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000059.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000059.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000060.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000060.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000060.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000060.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000061.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000061.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000061.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000061.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000062.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000062.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000062.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000062.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000063.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000063.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000063.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000063.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000064.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000064.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000064.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000064.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000065.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000065.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000065.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000065.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000066.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000066.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000066.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000066.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000067.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000067.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000067.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000067.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000068.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000068.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000068.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000068.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000069.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000069.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000069.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000069.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000070.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000070.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000070.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000070.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000071.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000071.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000071.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000071.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000072.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000072.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000072.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000072.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000073.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000073.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000073.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000073.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000074.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000074.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000074.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000074.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000075.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000075.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000075.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000075.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000076.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000076.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000076.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000076.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000077.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000077.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000077.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000077.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000078.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000078.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000078.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000078.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000079.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000079.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000079.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000079.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000080.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000080.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000080.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000080.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000081.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000081.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000081.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000081.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000082.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000082.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000082.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000082.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000083.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000083.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000083.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000083.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000084.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000084.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000084.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000084.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000085.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000085.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000085.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000085.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000086.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000086.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000086.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000086.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000087.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000087.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000087.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000087.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000088.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000088.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000088.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000088.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000089.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000089.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000089.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000089.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000090.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000090.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000090.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000090.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000091.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000091.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000091.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000091.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000092.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000092.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000092.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000092.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000093.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000093.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000093.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000093.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000094.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000094.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000094.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000094.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000095.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000095.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000095.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000095.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000096.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000096.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000096.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000096.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000097.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000097.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000097.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000097.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000098.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000098.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000098.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000098.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000099.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000099.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000099.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000099.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000100.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000100.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000100.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000100.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000101.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000101.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000101.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000102.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000102.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000102.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000102.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000103.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000103.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000103.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000103.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000104.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000104.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000104.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000104.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000105.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000105.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000105.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000105.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000106.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000106.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000106.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000106.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000107.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000107.jpg -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/0000000107.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/2011_09_26_drive_0001_sync_02/0000000107.npy -------------------------------------------------------------------------------- /datasets/kitti/2011_09_26_drive_0001_sync_02/cam.txt: -------------------------------------------------------------------------------- 1 | 2.416744631239935472e+02 0.000000000000000000e+00 2.041680103059581199e+02 2 | 0.000000000000000000e+00 2.462848682666666491e+02 5.900083200000000261e+01 3 | 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 4 | -------------------------------------------------------------------------------- /datasets/kitti/kitti_test/0000000000.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/kitti_test/0000000000.npz -------------------------------------------------------------------------------- /datasets/kitti/kitti_test/0000000000_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/kitti_test/0000000000_0.png -------------------------------------------------------------------------------- /datasets/kitti/kitti_test/0000000000_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/kitti/kitti_test/0000000000_1.png -------------------------------------------------------------------------------- /datasets/kitti/kitti_test/test.txt: -------------------------------------------------------------------------------- 1 | Test by Rui. 2 | -------------------------------------------------------------------------------- /datasets/kitti/train.txt: -------------------------------------------------------------------------------- 1 | 2011_09_26_drive_0001_sync_02 2 | -------------------------------------------------------------------------------- /datasets/kitti_split/train.txt: -------------------------------------------------------------------------------- 1 | 2011_09_26_drive_0032_sync_02 2 | 2011_10_03_drive_0042_sync_02 3 | 2011_09_26_drive_0101_sync_02 4 | 2011_09_28_drive_0045_sync_02 5 | 2011_09_26_drive_0027_sync_02 6 | 2011_09_30_drive_0027_sync_02 7 | 2011_09_30_drive_0033_sync_02 8 | 2011_09_28_drive_0038_sync_02 9 | 2011_09_26_drive_0017_sync_02 10 | 2011_09_26_drive_0046_sync_02 11 | 2011_09_26_drive_0087_sync_02 12 | 2011_10_03_drive_0027_sync_02 13 | 2011_09_28_drive_0037_sync_02 14 | 2011_09_26_drive_0117_sync_02 15 | 2011_09_29_drive_0004_sync_02 16 | 2011_09_26_drive_0035_sync_02 17 | 2011_09_26_drive_0019_sync_02 18 | 2011_09_30_drive_0028_sync_02 19 | 2011_09_26_drive_0039_sync_02 20 | 2011_09_26_drive_0023_sync_02 21 | 2011_09_26_drive_0029_sync_02 22 | 2011_09_26_drive_0086_sync_02 23 | 2011_09_26_drive_0013_sync_02 24 | 2011_09_26_drive_0015_sync_02 25 | 2011_09_26_drive_0070_sync_02 26 | 2011_09_26_drive_0096_sync_02 27 | 2011_09_26_drive_0051_sync_02 28 | 2011_09_29_drive_0026_sync_02 29 | 2011_09_26_drive_0093_sync_02 30 | 2011_09_28_drive_0034_sync_02 31 | 2011_09_28_drive_0039_sync_02 32 | 2011_09_26_drive_0018_sync_02 33 | 2011_09_28_drive_0043_sync_02 34 | 2011_09_26_drive_0104_sync_02 35 | 2011_09_26_drive_0048_sync_02 36 | 2011_09_26_drive_0022_sync_02 37 | 2011_09_26_drive_0036_sync_02 38 | 2011_09_28_drive_0001_sync_02 39 | 2011_09_26_drive_0095_sync_02 40 | 2011_09_30_drive_0034_sync_02 41 | 2011_09_30_drive_0018_sync_02 42 | 2011_09_26_drive_0014_sync_02 43 | 2011_09_26_drive_0028_sync_02 44 | 2011_09_26_drive_0057_sync_02 45 | 2011_09_30_drive_0020_sync_02 46 | 2011_09_26_drive_0059_sync_02 47 | 2011_09_28_drive_0002_sync_02 48 | 2011_09_26_drive_0009_sync_02 49 | 2011_09_26_drive_0064_sync_02 50 | 2011_09_26_drive_0084_sync_02 51 | 2011_09_26_drive_0056_sync_02 52 | 2011_10_03_drive_0034_sync_02 53 | 2011_09_30_drive_0016_sync_02 54 | 2011_10_03_drive_0047_sync_02 55 | 2011_09_28_drive_0047_sync_02 56 | 2011_09_26_drive_0079_sync_02 57 | 2011_09_26_drive_0113_sync_02 58 | 2011_09_26_drive_0106_sync_02 59 | 2011_09_29_drive_0071_sync_02 60 | 2011_09_26_drive_0061_sync_02 61 | 2011_09_26_drive_0091_sync_02 62 | -------------------------------------------------------------------------------- /datasets/kitti_split/val.txt: -------------------------------------------------------------------------------- 1 | 2011_09_26_drive_0002_sync_02 2 | 2011_09_26_drive_0001_sync_02 3 | 2011_09_26_drive_0020_sync_02 4 | 2011_09_26_drive_0052_sync_02 5 | 2011_09_26_drive_0011_sync_02 6 | 2011_09_26_drive_0005_sync_02 7 | -------------------------------------------------------------------------------- /datasets/patches_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | import numpy as np 6 | # import tensorflow as tf 7 | import cv2 8 | from pathlib import Path 9 | 10 | import torch 11 | import torch.utils.data as data 12 | 13 | # from .base_dataset import BaseDataset 14 | # from .utils import pipeline 15 | from utils.tools import dict_update 16 | 17 | from models.homographies import sample_homography 18 | from settings import DATA_PATH 19 | 20 | from imageio import imread 21 | def load_as_float(path): 22 | return imread(path).astype(np.float32)/255 23 | 24 | class PatchesDataset(data.Dataset): 25 | default_config = { 26 | 'dataset': 'hpatches', # or 'coco' 27 | 'alteration': 'all', # 'all', 'i' for illumination or 'v' for viewpoint 28 | 'cache_in_memory': False, 29 | 'truncate': None, 30 | 'preprocessing': { 31 | 'resize': False 32 | } 33 | } 34 | 35 | def __init__(self, transform=None, **config): 36 | self.config = self.default_config 37 | self.config = dict_update(self.config, config) 38 | self.files = self._init_dataset(**self.config) 39 | sequence_set = [] 40 | for (img, img_warped, mat_hom) in zip(self.files['image_paths'], self.files['warped_image_paths'], self.files['homography']): 41 | sample = {'image': img, 'warped_image': img_warped, 'homography': mat_hom} 42 | sequence_set.append(sample) 43 | self.samples = sequence_set 44 | self.transform = transform 45 | if config['preprocessing']['resize']: 46 | self.sizer = np.array(config['preprocessing']['resize']) 47 | pass 48 | 49 | def __getitem__(self, index): 50 | """ 51 | 52 | :param index: 53 | :return: 54 | image: 55 | tensor (1,H,W) 56 | warped_image: 57 | tensor (1,H,W) 58 | """ 59 | def _read_image(path): 60 | input_image = cv2.imread(path) 61 | return input_image 62 | 63 | def _preprocess(image): 64 | s = max(self.sizer /image.shape[:2]) 65 | image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) 66 | image = image[:int(self.sizer[0]/s),:int(self.sizer[1]/s)] 67 | image = cv2.resize(image, (self.sizer[1], self.sizer[0]), 68 | interpolation=cv2.INTER_AREA) 69 | image = image.astype('float32') / 255.0 70 | if image.ndim == 2: 71 | image = image[:,:, np.newaxis] 72 | if self.transform is not None: 73 | image = self.transform(image) 74 | return image 75 | 76 | def _warp_image(image): 77 | H = sample_homography(tf.shape(image)[:2]) 78 | warped_im = tf.contrib.image.transform(image, H, interpolation="BILINEAR") 79 | return {'warped_im': warped_im, 'H': H} 80 | 81 | def _adapt_homography_to_preprocessing(image, H): 82 | # image = zip_data['image'] 83 | # H = tf.cast(zip_data['homography'], tf.float32) 84 | # target_size = np.array(self.config['preprocessing']['resize']) 85 | s = max(self.sizer /image.shape[:2]) 86 | # mat = np.array([[1,1,1/s], [1,1,1/s], [s,s,1]]) 87 | mat = np.array([[1,1,s], [1,1,s], [1/s,1/s,1]]) 88 | # down_scale = np.diag(np.array([1/s, 1/s, 1])) 89 | # up_scale = tf.diag(tf.stack([s, s, tf.constant(1.)])) 90 | # H = tf.matmul(up_scale, tf.matmul(H, down_scale)) 91 | H = H*mat 92 | return H 93 | sample = self.samples[index] 94 | image_original = _read_image(sample['image']) 95 | image = _preprocess(image_original) 96 | warped_image = _preprocess(_read_image(sample['warped_image'])) 97 | to_numpy = False 98 | if to_numpy: 99 | image, warped_image = np.array(image), np.array(warped_image) 100 | homography = _adapt_homography_to_preprocessing(image_original, sample['homography']) 101 | sample = {'image': image, 'warped_image': warped_image, 102 | 'homography': homography} 103 | return sample 104 | 105 | def __len__(self): 106 | return len(self.samples) 107 | 108 | def _init_dataset(self, **config): 109 | dataset_folder = 'COCO/patches' if config['dataset'] == 'coco' else 'HPatches' 110 | base_path = Path(DATA_PATH, dataset_folder) 111 | folder_paths = [x for x in base_path.iterdir() if x.is_dir()] 112 | image_paths = [] 113 | warped_image_paths = [] 114 | homographies = [] 115 | for path in folder_paths: 116 | if config['alteration'] == 'i' and path.stem[0] != 'i': 117 | continue 118 | if config['alteration'] == 'v' and path.stem[0] != 'v': 119 | continue 120 | num_images = 1 if config['dataset'] == 'coco' else 5 121 | file_ext = '.ppm' if config['dataset'] == 'hpatches' else '.jpg' 122 | for i in range(2, 2 + num_images): 123 | image_paths.append(str(Path(path, "1" + file_ext))) 124 | warped_image_paths.append(str(Path(path, str(i) + file_ext))) 125 | homographies.append(np.loadtxt(str(Path(path, "H_1_" + str(i))))) 126 | if config['truncate']: 127 | image_paths = image_paths[:config['truncate']] 128 | warped_image_paths = warped_image_paths[:config['truncate']] 129 | homographies = homographies[:config['truncate']] 130 | files = {'image_paths': image_paths, 131 | 'warped_image_paths': warped_image_paths, 132 | 'homography': homographies} 133 | return files 134 | 135 | 136 | -------------------------------------------------------------------------------- /datasets/tum_split/train.txt: -------------------------------------------------------------------------------- 1 | rgbd_dataset_freiburg1_desk 2 | rgbd_dataset_freiburg1_room 3 | rgbd_dataset_freiburg2_desk 4 | rgbd_dataset_freiburg3_long_office_household 5 | -------------------------------------------------------------------------------- /datasets/tum_split/val.txt: -------------------------------------------------------------------------------- 1 | rgbd_dataset_freiburg1_desk2 2 | rgbd_dataset_freiburg2_xyz 3 | rgbd_dataset_freiburg3_nostructure_texture_far 4 | -------------------------------------------------------------------------------- /datasets/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/datasets/utils/__init__.py -------------------------------------------------------------------------------- /datasets/utils/photometric_augmentation.py: -------------------------------------------------------------------------------- 1 | """ Used to generate synthetic shapes 2 | 3 | """ 4 | 5 | import cv2 as cv 6 | import numpy as np 7 | import tensorflow as tf 8 | 9 | 10 | augmentations = [ 11 | 'additive_gaussian_noise', 12 | 'additive_speckle_noise', 13 | 'random_brightness', 14 | 'random_contrast', 15 | 'additive_shade', 16 | 'motion_blur' 17 | ] 18 | 19 | 20 | def additive_gaussian_noise(image, stddev_range=[5, 95]): 21 | stddev = tf.random_uniform((), *stddev_range) 22 | noise = tf.random_normal(tf.shape(image), stddev=stddev) 23 | noisy_image = tf.clip_by_value(image + noise, 0, 255) 24 | return noisy_image 25 | 26 | 27 | def additive_speckle_noise(image, prob_range=[0.0, 0.005]): 28 | prob = tf.random_uniform((), *prob_range) 29 | sample = tf.random_uniform(tf.shape(image)) 30 | noisy_image = tf.where(sample <= prob, tf.zeros_like(image), image) 31 | noisy_image = tf.where(sample >= (1. - prob), 255.*tf.ones_like(image), noisy_image) 32 | return noisy_image 33 | 34 | 35 | def random_brightness(image, max_abs_change=50): 36 | return tf.clip_by_value(tf.image.random_brightness(image, max_abs_change), 0, 255) 37 | 38 | 39 | def random_contrast(image, strength_range=[0.5, 1.5]): 40 | return tf.clip_by_value(tf.image.random_contrast(image, *strength_range), 0, 255) 41 | 42 | 43 | def additive_shade(image, nb_ellipses=20, transparency_range=[-0.5, 0.8], 44 | kernel_size_range=[250, 350]): 45 | 46 | def _py_additive_shade(img): 47 | min_dim = min(img.shape[:2]) / 4 48 | mask = np.zeros(img.shape[:2], np.uint8) 49 | for i in range(nb_ellipses): 50 | ax = int(max(np.random.rand() * min_dim, min_dim / 5)) 51 | ay = int(max(np.random.rand() * min_dim, min_dim / 5)) 52 | max_rad = max(ax, ay) 53 | x = np.random.randint(max_rad, img.shape[1] - max_rad) # center 54 | y = np.random.randint(max_rad, img.shape[0] - max_rad) 55 | angle = np.random.rand() * 90 56 | cv.ellipse(mask, (x, y), (ax, ay), angle, 0, 360, 255, -1) 57 | 58 | transparency = np.random.uniform(*transparency_range) 59 | kernel_size = np.random.randint(*kernel_size_range) 60 | if (kernel_size % 2) == 0: # kernel_size has to be odd 61 | kernel_size += 1 62 | mask = cv.GaussianBlur(mask.astype(np.float32), (kernel_size, kernel_size), 0) 63 | shaded = img * (1 - transparency * mask[..., np.newaxis]/255.) 64 | return np.clip(shaded, 0, 255) 65 | 66 | shaded = tf.py_func(_py_additive_shade, [image], tf.float32) 67 | res = tf.reshape(shaded, tf.shape(image)) 68 | return res 69 | 70 | 71 | def motion_blur(image, max_kernel_size=10): 72 | 73 | def _py_motion_blur(img): 74 | # Either vertial, hozirontal or diagonal blur 75 | mode = np.random.choice(['h', 'v', 'diag_down', 'diag_up']) 76 | ksize = np.random.randint(0, (max_kernel_size+1)/2)*2 + 1 # make sure is odd 77 | center = int((ksize-1)/2) 78 | kernel = np.zeros((ksize, ksize)) 79 | if mode == 'h': 80 | kernel[center, :] = 1. 81 | elif mode == 'v': 82 | kernel[:, center] = 1. 83 | elif mode == 'diag_down': 84 | kernel = np.eye(ksize) 85 | elif mode == 'diag_up': 86 | kernel = np.flip(np.eye(ksize), 0) 87 | var = ksize * ksize / 16. 88 | grid = np.repeat(np.arange(ksize)[:, np.newaxis], ksize, axis=-1) 89 | gaussian = np.exp(-(np.square(grid-center)+np.square(grid.T-center))/(2.*var)) 90 | kernel *= gaussian 91 | kernel /= np.sum(kernel) 92 | img = cv.filter2D(img, -1, kernel) 93 | return img 94 | 95 | blurred = tf.py_func(_py_motion_blur, [image], tf.float32) 96 | return tf.reshape(blurred, tf.shape(image)) 97 | -------------------------------------------------------------------------------- /datasets/utils/pipeline.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import cv2 as cv 3 | import numpy as np 4 | 5 | from datasets.utils import photometric_augmentation as photaug 6 | from models.homographies import (sample_homography, compute_valid_mask, 7 | warp_points, filter_points) 8 | 9 | 10 | def parse_primitives(names, all_primitives): 11 | p = all_primitives if (names == 'all') \ 12 | else (names if isinstance(names, list) else [names]) 13 | assert set(p) <= set(all_primitives) 14 | return p 15 | 16 | -------------------------------------------------------------------------------- /logs/magicpoint_synth20/checkpoints/superPointNet_200000_checkpoint.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/magicpoint_synth20/checkpoints/superPointNet_200000_checkpoint.pth.tar -------------------------------------------------------------------------------- /logs/magicpoint_synth_t2/checkpoints/superPointNet_100000_checkpoint.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/magicpoint_synth_t2/checkpoints/superPointNet_100000_checkpoint.pth.tar -------------------------------------------------------------------------------- /logs/magicpoint_synth_t2/config.yml: -------------------------------------------------------------------------------- 1 | data: 2 | add_augmentation_to_test_set: false 3 | augmentation: 4 | homographic: 5 | enable: true 6 | enable_train: true 7 | enable_val: false 8 | params: 9 | allow_artifacts: true 10 | max_angle: 1.57 11 | patch_ratio: 0.8 12 | perspective: true 13 | perspective_amplitude_x: 0.2 14 | perspective_amplitude_y: 0.2 15 | rotation: true 16 | scaling: true 17 | scaling_amplitude: 0.2 18 | translation: true 19 | translation_overflow: 0.05 20 | valid_border_margin: 2 21 | photometric: 22 | enable: true 23 | enable_train: true 24 | enable_val: false 25 | params: 26 | additive_gaussian_noise: 27 | stddev_range: 28 | - 0 29 | - 15 30 | additive_shade: 31 | kernel_size_range: 32 | - 50 33 | - 100 34 | transparency_range: 35 | - -0.5 36 | - 0.8 37 | additive_speckle_noise: 38 | prob_range: 39 | - 0 40 | - 0.0035 41 | motion_blur: 42 | max_kernel_size: 7 43 | random_brightness: 44 | max_abs_change: 75 45 | random_contrast: 46 | strength_range: 47 | - 0.3 48 | - 1.8 49 | primitives: 50 | - random_brightness 51 | - random_contrast 52 | - additive_speckle_noise 53 | - additive_gaussian_noise 54 | - additive_shade 55 | - motion_blur 56 | cache_in_memory: true 57 | dataset: SyntheticDataset_gaussian 58 | gaussian_label: 59 | enable: false 60 | params: 61 | GaussianBlur: 62 | sigma: 0.2 63 | preprocessing: 64 | blur_size: 21 65 | resize: 66 | - 120 67 | - 160 68 | primitives: all 69 | suffix: v6 70 | truncate: 71 | draw_ellipses: 0.3 72 | draw_stripes: 0.2 73 | gaussian_noise: 0.1 74 | warped_pair: 75 | enable: false 76 | params: 77 | allow_artifacts: true 78 | max_angle: 1.57 79 | patch_ratio: 0.85 80 | perspective: true 81 | perspective_amplitude_x: 0.2 82 | perspective_amplitude_y: 0.2 83 | rotation: true 84 | scaling: true 85 | scaling_amplitude: 0.2 86 | translation: true 87 | valid_border_margin: 3 88 | front_end_model: Train_model_heatmap 89 | model: 90 | batch_size: 64 91 | dense_loss: 92 | enable: false 93 | params: 94 | descriptor_dist: 4 95 | lambda_d: 800 96 | detection_threshold: 0.001 97 | detector_loss: 98 | loss_type: softmax 99 | eval_batch_size: 16 100 | kernel_reg: 0.0 101 | lambda_loss: 0 102 | learning_rate: 0.001 103 | name: SuperPointNet_gauss2 104 | nms: 4 105 | other_settings: train 2d, gauss 0.5 106 | params: {} 107 | sparse_loss: 108 | enable: true 109 | params: 110 | dist: cos 111 | lamda_d: 1 112 | method: 2d 113 | num_masked_non_matches_per_match: 100 114 | num_matching_attempts: 1000 115 | reset_iter: true 116 | retrain: true 117 | save_interval: 2000 118 | seed: 0 119 | tensorboard_interval: 1000 120 | train_iter: 200000 121 | train_show_interval: 1000 122 | validation_interval: 1000 123 | validation_size: 10 124 | -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0/checkpoints/superPointNet_170000_checkpoint.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0/checkpoints/superPointNet_170000_checkpoint.pth.tar -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0/checkpoints/superPointNet_90000_checkpoint.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0/checkpoints/superPointNet_90000_checkpoint.pth.tar -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0/config.yml: -------------------------------------------------------------------------------- 1 | data: 2 | augmentation: 3 | homographic: 4 | enable: false 5 | photometric: 6 | enable: true 7 | params: 8 | additive_gaussian_noise: 9 | stddev_range: 10 | - 0 11 | - 10 12 | additive_shade: 13 | kernel_size_range: 14 | - 100 15 | - 150 16 | transparency_range: 17 | - -0.5 18 | - 0.5 19 | additive_speckle_noise: 20 | prob_range: 21 | - 0 22 | - 0.0035 23 | motion_blur: 24 | max_kernel_size: 3 25 | random_brightness: 26 | max_abs_change: 50 27 | random_contrast: 28 | strength_range: 29 | - 0.5 30 | - 1.5 31 | primitives: 32 | - random_brightness 33 | - random_contrast 34 | - additive_speckle_noise 35 | - additive_gaussian_noise 36 | - additive_shade 37 | - motion_blur 38 | cache_in_memory: false 39 | dataset: Coco 40 | gaussian_label: 41 | enable: true 42 | params: 43 | GaussianBlur: 44 | sigma: 0.2 45 | labels: magicpoint_synth20_homoAdapt100_coco_f1/predictions 46 | name: coco 47 | preprocessing: 48 | resize: 49 | - 240 50 | - 320 51 | warped_pair: 52 | enable: true 53 | params: 54 | allow_artifacts: true 55 | max_angle: 1.57 56 | patch_ratio: 0.85 57 | perspective: true 58 | perspective_amplitude_x: 0.2 59 | perspective_amplitude_y: 0.2 60 | rotation: true 61 | scaling: true 62 | scaling_amplitude: 0.2 63 | translation: true 64 | valid_border_margin: 3 65 | front_end_model: Train_model_heatmap 66 | model: 67 | batch_size: 8 68 | dense_loss: 69 | enable: false 70 | params: 71 | descriptor_dist: 4 72 | lambda_d: 800 73 | detection_threshold: 0.015 74 | detector_loss: 75 | loss_type: softmax 76 | eval_batch_size: 8 77 | lambda_loss: 1 78 | learning_rate: 0.0001 79 | name: SuperPointNet_gauss2 80 | nms: 4 81 | other_settings: train 2d, gauss 0.2 82 | params: {} 83 | sparse_loss: 84 | enable: true 85 | params: 86 | dist: cos 87 | lamda_d: 1 88 | method: 2d 89 | num_masked_non_matches_per_match: 100 90 | num_matching_attempts: 1000 91 | reset_iter: true 92 | retrain: true 93 | save_interval: 2000 94 | tensorboard_interval: 200 95 | train_iter: 170000 96 | validation_interval: 200 97 | validation_size: 10 98 | -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/config.yml: -------------------------------------------------------------------------------- 1 | data: 2 | alteration: all 3 | dataset: hpatches 4 | name: patches_dataset 5 | preprocessing: 6 | resize: 7 | - 240 8 | - 320 9 | eval_iter: 1000 10 | front_end_model: Val_model_heatmap 11 | model: 12 | batch_size: 1 13 | detection_threshold: 0.015 14 | eval_batch_size: 1 15 | homography_adaptation: 16 | num: 0 17 | learning_rate: 0.001 18 | name: SuperPointNet_gauss2 19 | nms: 4 20 | nn_thresh: 1.0 21 | params: {} 22 | pretrained: logs/superpoint_coco_heat2_0/checkpoints/superPointNet_170000_checkpoint.pth.tar 23 | rand_noise: 24 | enable: false 25 | sigma: 0.2 26 | subpixel: 27 | enable: true 28 | patch_size: 5 29 | top_k: 1000 30 | -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/0.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/0.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/1.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/1.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/2.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/3.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/3.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/4.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/4.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/5.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/5.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/6.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/6.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/7.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/7.npz -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/0cv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/0cv.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/0m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/0m.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/1cv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/1cv.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/1m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/1m.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/2cv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/2cv.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/2m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/2m.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/3cv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/3cv.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/3m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/3m.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/4cv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/4cv.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/4m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/4m.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/5cv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/5cv.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/5m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/5m.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/6cv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/6cv.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/6m.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/matching/6m.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/0.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/1.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/2.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/3.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/4.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/5.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/repeatibility3/6.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/0.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/1.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/2.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/3.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/4.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/5.png -------------------------------------------------------------------------------- /logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_coco_heat2_0_170k_nms4_det0.015/predictions/warping/6.png -------------------------------------------------------------------------------- /logs/superpoint_kitti_heat2_0/checkpoints/superPointNet_50000_checkpoint.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/logs/superpoint_kitti_heat2_0/checkpoints/superPointNet_50000_checkpoint.pth.tar -------------------------------------------------------------------------------- /logs/superpoint_kitti_heat2_0/config.yml: -------------------------------------------------------------------------------- 1 | data: 2 | augmentation: 3 | homographic: 4 | enable: false 5 | params: {} 6 | photometric: 7 | enable: true 8 | params: 9 | additive_gaussian_noise: 10 | stddev_range: 11 | - 0 12 | - 10 13 | additive_shade: 14 | kernel_size_range: 15 | - 100 16 | - 150 17 | transparency_range: 18 | - -0.5 19 | - 0.5 20 | additive_speckle_noise: 21 | prob_range: 22 | - 0 23 | - 0.0035 24 | motion_blur: 25 | max_kernel_size: 3 26 | random_brightness: 27 | max_abs_change: 50 28 | random_contrast: 29 | strength_range: 30 | - 0.5 31 | - 1.5 32 | primitives: 33 | - random_brightness 34 | - random_contrast 35 | - additive_speckle_noise 36 | - additive_gaussian_noise 37 | - additive_shade 38 | - motion_blur 39 | cache_in_memory: false 40 | dataset: Kitti_inh 41 | gaussian_label: 42 | enable: true 43 | params: 44 | GaussianBlur: 45 | sigma: 0.2 46 | labels: magicpoint_synth20_homoAdapt100_kitti_h384/predictions 47 | name: kitti 48 | preprocessing: 49 | resize: 50 | - 384 51 | - 1248 52 | warped_pair: 53 | enable: true 54 | params: 55 | allow_artifacts: true 56 | max_angle: 1.57 57 | patch_ratio: 0.85 58 | perspective: true 59 | perspective_amplitude_x: 0.2 60 | perspective_amplitude_y: 0.2 61 | rotation: true 62 | scaling: true 63 | scaling_amplitude: 0.2 64 | translation: true 65 | valid_border_margin: 3 66 | front_end_model: Train_model_heatmap 67 | model: 68 | batch_size: 4 69 | dense_loss: 70 | enable: false 71 | params: 72 | descriptor_dist: 4 73 | lambda_d: 800 74 | detection_threshold: 0.015 75 | detector_loss: 76 | loss_type: softmax 77 | eval_batch_size: 4 78 | lambda_loss: 1 79 | learning_rate: 0.0001 80 | name: SuperPointNet_gauss2 81 | nms: 4 82 | params: {} 83 | sparse_loss: 84 | enable: true 85 | params: 86 | dist: cos 87 | lamda_d: 1 88 | method: 2d 89 | num_masked_non_matches_per_match: 100 90 | num_matching_attempts: 600 91 | other_settings: train from pretrained, 2d method 92 | pretrained: logs/superpoint_coco_heat2_0/checkpoints/superPointNet_90000_checkpoint.pth.tar 93 | reset_iter: true 94 | retrain: false 95 | save_interval: 2000 96 | tensorboard_interval: 400 97 | train_iter: 50000 98 | validation_interval: 2000 99 | validation_size: 5 100 | -------------------------------------------------------------------------------- /models/SubpixelNet.py: -------------------------------------------------------------------------------- 1 | """Old version of SuperpointNet. Use it together with 2 | logs/magicpoint_synth20/checkpoints/superPointNet_200000_checkpoint.pth.tar 3 | 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | from torch.nn.init import xavier_uniform_, zeros_ 9 | from models.unet_parts import * 10 | 11 | # from models.SubpixelNet import SubpixelNet 12 | class SubpixelNet(torch.nn.Module): 13 | """ Pytorch definition of SuperPoint Network. """ 14 | def __init__(self, subpixel_channel=1): 15 | super(SubpixelNet, self).__init__() 16 | c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256 17 | det_h = 65 18 | self.inc = inconv(1, c1) 19 | self.down1 = down(c1, c2) 20 | self.down2 = down(c2, c3) 21 | self.down3 = down(c3, c4) 22 | # self.down4 = down(c4, 512) 23 | self.up1 = up(c4+c3, c2) 24 | self.up2 = up(c2+c2, c1) 25 | self.up3 = up(c1+c1, c1) 26 | self.outc = outconv(c1, subpixel_channel) 27 | self.relu = torch.nn.ReLU(inplace=True) 28 | # self.outc = outconv(64, n_classes) 29 | # Detector Head. 30 | self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) 31 | self.bnPa = nn.BatchNorm2d(c5) 32 | self.convPb = torch.nn.Conv2d(c5, det_h, kernel_size=1, stride=1, padding=0) 33 | self.bnPb = nn.BatchNorm2d(det_h) 34 | # Descriptor Head. 35 | self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) 36 | self.bnDa = nn.BatchNorm2d(c5) 37 | self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) 38 | self.bnDb = nn.BatchNorm2d(d1) 39 | 40 | @staticmethod 41 | def soft_argmax_2d(patches): 42 | """ 43 | params: 44 | patches: (B, N, H, W) 45 | return: 46 | coor: (B, N, 2) (x, y) 47 | 48 | """ 49 | import torchgeometry as tgm 50 | m = tgm.contrib.SpatialSoftArgmax2d() 51 | coords = m(patches) # 1x4x2 52 | return coords 53 | 54 | def forward(self, x, subpixel=False): 55 | """ Forward pass that jointly computes unprocessed point and descriptor 56 | tensors. 57 | Input 58 | x: Image pytorch tensor shaped N x 1 x patch_size x patch_size. 59 | Output 60 | semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8. 61 | desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8. 62 | """ 63 | # Let's stick to this version: first BN, then relu 64 | x1 = self.inc(x) 65 | x2 = self.down1(x1) 66 | x3 = self.down2(x2) 67 | x4 = self.down3(x3) 68 | # x5 = self.down4(x4) 69 | 70 | cPa = self.bnPa(self.relu(self.convPa(x4))) 71 | semi = self.bnPb(self.convPb(cPa)) 72 | # Descriptor Head. 73 | cDa = self.bnDa(self.relu(self.convDa(x4))) 74 | desc = self.bnDb(self.convDb(cDa)) 75 | 76 | dn = torch.norm(desc, p=2, dim=1) # Compute the norm. 77 | desc = desc.div(torch.unsqueeze(dn, 1)) # Divide by norm to normalize. 78 | 79 | # subpixel = True 80 | if subpixel: 81 | x = self.up1(x4, x3) 82 | x = self.up2(x, x2) 83 | x = self.up3(x, x1) 84 | x = self.outc(x) 85 | # print("x: ", x.shape) 86 | return semi, desc, x 87 | 88 | return semi, desc 89 | 90 | 91 | 92 | if __name__ == '__main__': 93 | 94 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 95 | model = SubpixelNet() 96 | model = model.to(device) 97 | 98 | 99 | # check keras-like model summary using torchsummary 100 | from torchsummary import summary 101 | summary(model, input_size=(1, 240, 320)) 102 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | def get_model(name): 2 | mod = __import__('superpoint.models.{}'.format(name), fromlist=['']) 3 | return getattr(mod, _module_to_class(name)) 4 | 5 | 6 | def _module_to_class(name): 7 | return ''.join(n.capitalize() for n in name.split('_')) 8 | -------------------------------------------------------------------------------- /models/classical_detectors_descriptors.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import cv2 4 | 5 | # from .base_model import BaseModel 6 | # from .utils import box_nms 7 | 8 | def classical_detector_descriptor(im, **config): 9 | im = np.uint8(im) 10 | if config['method'] == 'sift': 11 | sift = cv2.xfeatures2d.SIFT_create(nfeatures=1500) 12 | keypoints, desc = sift.detectAndCompute(im, None) 13 | responses = np.array([k.response for k in keypoints]) 14 | keypoints = np.array([k.pt for k in keypoints]).astype(int) 15 | desc = np.array(desc) 16 | 17 | detections = np.zeros(im.shape[:2], np.float) 18 | detections[keypoints[:, 1], keypoints[:, 0]] = responses 19 | descriptors = np.zeros((im.shape[0], im.shape[1], 128), np.float) 20 | descriptors[keypoints[:, 1], keypoints[:, 0]] = desc 21 | 22 | elif config['method'] == 'orb': 23 | orb = cv2.ORB_create(nfeatures=1500) 24 | keypoints, desc = orb.detectAndCompute(im, None) 25 | responses = np.array([k.response for k in keypoints]) 26 | keypoints = np.array([k.pt for k in keypoints]).astype(int) 27 | desc = np.array(desc) 28 | 29 | detections = np.zeros(im.shape[:2], np.float) 30 | detections[keypoints[:, 1], keypoints[:, 0]] = responses 31 | descriptors = np.zeros((im.shape[0], im.shape[1], 32), np.float) 32 | descriptors[keypoints[:, 1], keypoints[:, 0]] = desc 33 | 34 | detections = detections.astype(np.float32) 35 | descriptors = descriptors.astype(np.float32) 36 | return (detections, descriptors) 37 | 38 | # from models.classical_detector_descriptors import SIFT_det 39 | def SIFT_det(img, img_rgb, visualize=False, nfeatures=2000): 40 | """ 41 | return: 42 | x_all: np [N, 2] (x, y) 43 | des: np [N, 128] (descriptors) 44 | """ 45 | # Initiate SIFT detector 46 | # pip install opencv-python==3.4.2.16, opencv-contrib-python==3.4.2.16 47 | # https://www.pyimagesearch.com/2015/07/16/where-did-sift-and-surf-go-in-opencv-3/ 48 | img = np.uint8(img) 49 | # print("img: ", img) 50 | sift = cv2.xfeatures2d.SIFT_create(contrastThreshold=1e-5) 51 | 52 | # find the keypoints and descriptors with SIFT 53 | kp, des = sift.detectAndCompute(img, None) 54 | # print("# kps: {}, descriptors: {}".format(len(kp), des.shape)) 55 | x_all = np.array([p.pt for p in kp]) 56 | 57 | if visualize: 58 | plt.figure(figsize=(30, 4)) 59 | plt.imshow(img_rgb) 60 | 61 | plt.scatter(x_all[:, 0], x_all[:, 1], s=10, marker='o', c='y') 62 | plt.show() 63 | 64 | # return x_all, kp, des 65 | 66 | return x_all, des 67 | 68 | ''' 69 | class ClassicalDetectorsDescriptors(BaseModel): 70 | input_spec = { 71 | 'image': {'shape': [None, None, None, 1], 'type': tf.float32} 72 | } 73 | default_config = { 74 | 'method': 'sift', # 'orb' 75 | 'threshold': 0.5, 76 | 'nms': 4, 77 | 'top_k': 300, 78 | } 79 | trainable = False 80 | 81 | def _model(self, inputs, mode, **config): 82 | im = inputs['image'] 83 | with tf.device('/cpu:0'): 84 | keypoints, descriptors = tf.map_fn(lambda i: tf.py_func( 85 | lambda x: classical_detector_descriptor(x, **config), 86 | [i], 87 | (tf.float32, tf.float32)), 88 | im, [tf.float32, tf.float32]) 89 | prob = keypoints 90 | prob_nms = prob 91 | if config['nms']: 92 | prob_nms = tf.map_fn(lambda p: box_nms(p, config['nms'], min_prob=0., 93 | keep_top_k=config['top_k']), prob) 94 | pred = tf.cast(tf.greater_equal(prob_nms, config['threshold']), tf.int32) 95 | keypoints = {'prob': prob, 'prob_nms': prob_nms, 'pred': pred} 96 | return {**keypoints, 'descriptors': descriptors} 97 | 98 | def _loss(self, outputs, inputs, **config): 99 | raise NotImplementedError 100 | 101 | def _metrics(self, outputs, inputs, **config): 102 | pred = outputs['pred'] 103 | labels = inputs['keypoint_map'] 104 | precision = tf.reduce_sum(pred*labels) / tf.reduce_sum(pred) 105 | recall = tf.reduce_sum(pred*labels) / tf.reduce_sum(labels) 106 | return {'precision': precision, 'recall': recall} 107 | ''' -------------------------------------------------------------------------------- /models/unet_parts.py: -------------------------------------------------------------------------------- 1 | """U-net parts used for SuperPointNet_gauss2.py 2 | """ 3 | # sub-parts of the U-Net model 4 | 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | 9 | 10 | class double_conv(nn.Module): 11 | '''(conv => BN => ReLU) * 2''' 12 | def __init__(self, in_ch, out_ch): 13 | super(double_conv, self).__init__() 14 | self.conv = nn.Sequential( 15 | nn.Conv2d(in_ch, out_ch, 3, padding=1), 16 | nn.BatchNorm2d(out_ch), 17 | nn.ReLU(inplace=True), 18 | nn.Conv2d(out_ch, out_ch, 3, padding=1), 19 | nn.BatchNorm2d(out_ch), 20 | nn.ReLU(inplace=True) 21 | ) 22 | 23 | def forward(self, x): 24 | x = self.conv(x) 25 | return x 26 | 27 | 28 | class inconv(nn.Module): 29 | def __init__(self, in_ch, out_ch): 30 | super(inconv, self).__init__() 31 | self.conv = double_conv(in_ch, out_ch) 32 | 33 | def forward(self, x): 34 | x = self.conv(x) 35 | return x 36 | 37 | 38 | class down(nn.Module): 39 | def __init__(self, in_ch, out_ch): 40 | super(down, self).__init__() 41 | self.mpconv = nn.Sequential( 42 | nn.MaxPool2d(2), 43 | double_conv(in_ch, out_ch) 44 | ) 45 | 46 | def forward(self, x): 47 | x = self.mpconv(x) 48 | return x 49 | 50 | 51 | class up(nn.Module): 52 | def __init__(self, in_ch, out_ch, bilinear=True): 53 | super(up, self).__init__() 54 | 55 | # would be a nice idea if the upsampling could be learned too, 56 | # but my machine do not have enough memory to handle all those weights 57 | if bilinear: 58 | self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 59 | else: 60 | self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2) 61 | 62 | self.conv = double_conv(in_ch, out_ch) 63 | 64 | def forward(self, x1, x2): 65 | x1 = self.up(x1) 66 | 67 | # input is CHW 68 | diffY = x2.size()[2] - x1.size()[2] 69 | diffX = x2.size()[3] - x1.size()[3] 70 | 71 | x1 = F.pad(x1, (diffX // 2, diffX - diffX//2, 72 | diffY // 2, diffY - diffY//2)) 73 | 74 | # for padding issues, see 75 | # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a 76 | # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd 77 | 78 | x = torch.cat([x2, x1], dim=1) 79 | x = self.conv(x) 80 | return x 81 | 82 | 83 | class outconv(nn.Module): 84 | def __init__(self, in_ch, out_ch): 85 | super(outconv, self).__init__() 86 | self.conv = nn.Conv2d(in_ch, out_ch, 1) 87 | 88 | def forward(self, x): 89 | x = self.conv(x) 90 | return x 91 | -------------------------------------------------------------------------------- /notebooks/Untitled.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [], 3 | "metadata": {}, 4 | "nbformat": 4, 5 | "nbformat_minor": 2 6 | } 7 | -------------------------------------------------------------------------------- /notebooks/export_homography_adaptation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# export homography adaptation on small kitti" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "# checkpoint\n", 19 | "\n", 20 | "# output folder\n", 21 | "\n", 22 | "# load the config\n", 23 | "\n", 24 | "# export train\n", 25 | "\n", 26 | "# export val\n", 27 | "\n" 28 | ] 29 | } 30 | ], 31 | "metadata": { 32 | "kernelspec": { 33 | "display_name": "Python 3", 34 | "language": "python", 35 | "name": "python3" 36 | }, 37 | "language_info": { 38 | "codemirror_mode": { 39 | "name": "ipython", 40 | "version": 3 41 | }, 42 | "file_extension": ".py", 43 | "mimetype": "text/x-python", 44 | "name": "python", 45 | "nbconvert_exporter": "python", 46 | "pygments_lexer": "ipython3", 47 | "version": "3.6.7" 48 | } 49 | }, 50 | "nbformat": 4, 51 | "nbformat_minor": 2 52 | } 53 | -------------------------------------------------------------------------------- /notebooks/h2.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/notebooks/h2.npz -------------------------------------------------------------------------------- /notebooks/kitti_correspondence.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "ename": "ModuleNotFoundError", 17 | "evalue": "No module named 'source'", 18 | "output_type": "error", 19 | "traceback": [ 20 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 21 | "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", 22 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpatches\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mpatches\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0msource\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mload_tracklets_for_frames\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpoint_inside\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0min_hull\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msource\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mparseTrackletXML\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mxmlParser\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msource\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkitti_util\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 23 | "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'source'" 24 | ] 25 | } 26 | ], 27 | "source": [ 28 | "import sys\n", 29 | "KITTI_ROOT_PATH = '/home/ruizhu/Documents/Datasets/kitti'\n", 30 | "sys.path.append(KITTI_ROOT_PATH+'/kitti-lidar-utils')\n", 31 | "sys.path.append(KITTI_ROOT_PATH+'/kitti-lidar-utils/source')\n", 32 | "sys.path.append(KITTI_ROOT_PATH+'/depth/devkit/python')\n", 33 | "\n", 34 | "import numpy as np\n", 35 | "np.set_printoptions(precision=4, suppress=True)\n", 36 | "\n", 37 | "%reload_ext autoreload\n", 38 | "%autoreload 2\n", 39 | "\n", 40 | "from scipy.io import savemat\n", 41 | "import itertools\n", 42 | "import pdb\n", 43 | "import os\n", 44 | "from PIL import Image, ImageOps\n", 45 | "import imageio\n", 46 | "%matplotlib inline\n", 47 | "import matplotlib.pyplot as plt\n", 48 | "import matplotlib.patches as patches\n", 49 | "import time\n", 50 | "from source.utils import load_tracklets_for_frames, point_inside, in_hull\n", 51 | "from source import parseTrackletXML as xmlParser\n", 52 | "from source.kitti_util import *\n", 53 | "import argparse\n", 54 | "from matplotlib import cm\n", 55 | "from math import atan2, degrees\n", 56 | "from PIL import Image\n", 57 | "from read_depth import *\n", 58 | "from IPython.core.display import display, HTML\n", 59 | "display(HTML(\"\"))\n", 60 | "import torch\n", 61 | "\n", 62 | "import dsac_tools.utils_F as utils_F\n", 63 | "import dsac_tools.utils_geo as utils_geo\n", 64 | "import dsac_tools.utils_misc as utils_misc\n", 65 | "import dsac_tools.utils_vis as utils_vis\n", 66 | "import dsac_tools.utils_opencv as utils_opencv\n", 67 | "from dsac_tools.H_loss import HLoss\n", 68 | "from dsac_tools.dsac import DSAC\n", 69 | "\n", 70 | "from kitti_tools.utils_kitti import *\n", 71 | "kitti_two_frame_loader = KittiLoader(KITTI_ROOT_PATH)\n", 72 | "\n", 73 | "## Select sequence\n", 74 | "date_name = '2011_09_26'\n", 75 | "seq_name = '0001'\n", 76 | "\n", 77 | "kitti_two_frame_loader.set_drive(date_name, seq_name)\n", 78 | "kitti_two_frame_loader.get_left_right_gt()\n", 79 | "kitti_two_frame_loader.load_cam_poses()\n", 80 | "# kitti_two_frame_loader.show_demo()\n", 81 | "kitti_two_frame_loader.rectify_all(visualize=False)\n", 82 | "\n", 83 | "## Help functions and plot params\n", 84 | "fig_scale = 300\n", 85 | "fig_ratio = [4, 3]\n", 86 | "im_shape = kitti_two_frame_loader.im_shape" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "## Get two frames" 96 | ] 97 | } 98 | ], 99 | "metadata": { 100 | "kernelspec": { 101 | "display_name": "py36_pytorch", 102 | "language": "python", 103 | "name": "py36_pytorch" 104 | }, 105 | "language_info": { 106 | "codemirror_mode": { 107 | "name": "ipython", 108 | "version": 3 109 | }, 110 | "file_extension": ".py", 111 | "mimetype": "text/x-python", 112 | "name": "python", 113 | "nbconvert_exporter": "python", 114 | "pygments_lexer": "ipython3", 115 | "version": "3.6.7" 116 | } 117 | }, 118 | "nbformat": 4, 119 | "nbformat_minor": 2 120 | } 121 | -------------------------------------------------------------------------------- /notebooks/sort_kitti_dataset.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "/home/yoyee/Documents/deepSfm\n" 13 | ] 14 | } 15 | ], 16 | "source": [ 17 | "import os\n", 18 | "import sys\n", 19 | "import numpy as np\n", 20 | "import subprocess\n", 21 | "subprocess.run([\"ls\", \"-l\"])\n", 22 | "\n", 23 | "module_path = os.path.abspath(os.path.join('..'))\n", 24 | "if module_path not in sys.path:\n", 25 | " sys.path.append(module_path)\n", 26 | "os.chdir('..')\n", 27 | "print(os.getcwd())\n", 28 | "\n", 29 | "\n" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 4, 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "name": "stderr", 39 | "output_type": "stream", 40 | "text": [ 41 | "/home/yoyee/.local/lib/python3.6/site-packages/ipykernel_launcher.py:3: UserWarning: loadtxt: Empty input file: \"logs/magicpoint_synth20_homoAdapt100_kitti/predictions/val.txt\"\n", 42 | " This is separate from the ipykernel package so we can avoid doing imports until\n", 43 | "/home/yoyee/.local/lib/python3.6/site-packages/ipykernel_launcher.py:4: UserWarning: loadtxt: Empty input file: \"logs/magicpoint_synth20_homoAdapt100_kitti/predictions/val.txt\"\n", 44 | " after removing the cwd from sys.path.\n" 45 | ] 46 | } 47 | ], 48 | "source": [ 49 | "folder = 'logs/magicpoint_synth20_homoAdapt100_kitti/predictions'\n", 50 | "file = 'val.txt'\n", 51 | "val = np.loadtxt(folder + '/' + file)\n", 52 | "val = np.loadtxt(folder + '/' + file)" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 3, 58 | "metadata": {}, 59 | "outputs": [ 60 | { 61 | "data": { 62 | "text/plain": [ 63 | "array([], dtype=float64)" 64 | ] 65 | }, 66 | "execution_count": 3, 67 | "metadata": {}, 68 | "output_type": "execute_result" 69 | } 70 | ], 71 | "source": [ 72 | "val" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [] 81 | } 82 | ], 83 | "metadata": { 84 | "kernelspec": { 85 | "display_name": "py36_pytorch", 86 | "language": "python", 87 | "name": "py36_pytorch" 88 | }, 89 | "language_info": { 90 | "codemirror_mode": { 91 | "name": "ipython", 92 | "version": 3 93 | }, 94 | "file_extension": ".py", 95 | "mimetype": "text/x-python", 96 | "name": "python", 97 | "nbconvert_exporter": "python", 98 | "pygments_lexer": "ipython3", 99 | "version": "3.6.7" 100 | } 101 | }, 102 | "nbformat": 4, 103 | "nbformat_minor": 2 104 | } 105 | -------------------------------------------------------------------------------- /notebooks/test_export.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Test exported data" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "name": "stdout", 17 | "output_type": "stream", 18 | "text": [ 19 | "/home/yoyee/Documents/deepSfm\n" 20 | ] 21 | } 22 | ], 23 | "source": [ 24 | "import os\n", 25 | "import sys\n", 26 | "module_path = os.path.abspath(os.path.join('..'))\n", 27 | "if module_path not in sys.path:\n", 28 | " sys.path.append(module_path)\n", 29 | "os.chdir('../')\n", 30 | "print(os.getcwd())" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 2, 36 | "metadata": {}, 37 | "outputs": [ 38 | { 39 | "data": { 40 | "text/html": [ 41 | "" 42 | ], 43 | "text/plain": [ 44 | "" 45 | ] 46 | }, 47 | "metadata": {}, 48 | "output_type": "display_data" 49 | } 50 | ], 51 | "source": [ 52 | "import numpy as np\n", 53 | "import matplotlib.pyplot as plt\n", 54 | "import logging\n", 55 | "\n", 56 | "%load_ext autoreload\n", 57 | "%autoreload 2\n", 58 | "\n", 59 | "from IPython.core.display import display, HTML\n", 60 | "display(HTML(\"\"))" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 6, 66 | "metadata": {}, 67 | "outputs": [ 68 | { 69 | "name": "stdout", 70 | "output_type": "stream", 71 | "text": [ 72 | "data: ['image', 'prob', 'desc', 'warped_image', 'matches', 'warped_prob', 'warped_desc', 'homography']\n", 73 | "data: ['image', 'prob', 'desc', 'warped_image', 'matches', 'warped_prob', 'warped_desc', 'homography']\n", 74 | "data: ['image', 'prob', 'desc', 'warped_image', 'matches', 'warped_prob', 'warped_desc', 'homography']\n", 75 | "keypoints: [[109.17230684 283.03176337]\n", 76 | " [ 34.07878713 294.94217514]\n", 77 | " [129.87245847 91.09245099]\n", 78 | " [ 94.12839252 63.99860885]\n", 79 | " [ 64.90133487 262.08064939]]\n", 80 | "warped_keypoints: [[ 64.88634469 262.05720991]\n", 81 | " [181.17106545 129.98775617]\n", 82 | " [ 43.23742922 76.90757325]\n", 83 | " [ 15.12823653 125.97053776]\n", 84 | " [ 16.90259865 135.89217377]]\n" 85 | ] 86 | } 87 | ], 88 | "source": [ 89 | "file = 'logs/superpoint_coco_test/predictions/'\n", 90 | "for i in range(3):\n", 91 | " data = np.load(file + str(i) + '.npz')\n", 92 | " print(\"data: \", list(data))\n", 93 | " image = data['image']\n", 94 | " warped_image = data['warped_image']\n", 95 | " real_H = data['homography']\n", 96 | " keypoints = data['prob'][:,[1, 0]] # (y, x)\n", 97 | " desc = data['desc']\n", 98 | " warped_keypoints = data['warped_prob'][:,[1, 0]]\n", 99 | " \n", 100 | "print(\"keypoints: \", keypoints[:5])\n", 101 | "print(\"warped_keypoints: \", warped_keypoints[:5])" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [] 110 | } 111 | ], 112 | "metadata": { 113 | "kernelspec": { 114 | "display_name": "py36_pytorch", 115 | "language": "python", 116 | "name": "py36_pytorch" 117 | }, 118 | "language_info": { 119 | "codemirror_mode": { 120 | "name": "ipython", 121 | "version": 3 122 | }, 123 | "file_extension": ".py", 124 | "mimetype": "text/x-python", 125 | "name": "python", 126 | "nbconvert_exporter": "python", 127 | "pygments_lexer": "ipython3", 128 | "version": "3.6.7" 129 | } 130 | }, 131 | "nbformat": 4, 132 | "nbformat_minor": 2 133 | } 134 | -------------------------------------------------------------------------------- /notebooks/torch_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# test torch\n", 8 | "- test if models are different (well trained)" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 2, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "import torch" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 21, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "mat = torch.zeros((80, 100))\n", 27 | "mat = mat\n", 28 | "Hc, Wc = mat.shape" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 22, 34 | "metadata": {}, 35 | "outputs": [ 36 | { 37 | "name": "stdout", 38 | "output_type": "stream", 39 | "text": [ 40 | "Hc: 80 , Wc: 100\n" 41 | ] 42 | } 43 | ], 44 | "source": [ 45 | "print(\"Hc: \", Hc, \", Wc: \", Wc)" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 20, 51 | "metadata": {}, 52 | "outputs": [ 53 | { 54 | "name": "stdout", 55 | "output_type": "stream", 56 | "text": [ 57 | "mat_shaep: tensor([160, 200])\n" 58 | ] 59 | } 60 | ], 61 | "source": [ 62 | "cell_size = 2\n", 63 | "H, W = Hc*cell_size, Wc*cell_size \n", 64 | "mat_shape = torch.tensor([H, W])\n", 65 | "print(\"mat_shape: \", mat_shape)" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 23, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "a = torch.zeros((10, 10), dtype=torch.float32)" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 25, 80 | "metadata": {}, 81 | "outputs": [ 82 | { 83 | "data": { 84 | "text/plain": [ 85 | "'torch.FloatTensor'" 86 | ] 87 | }, 88 | "execution_count": 25, 89 | "metadata": {}, 90 | "output_type": "execute_result" 91 | } 92 | ], 93 | "source": [ 94 | "a.type()" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [] 103 | } 104 | ], 105 | "metadata": { 106 | "kernelspec": { 107 | "display_name": "py36-torch1.1-cuda10", 108 | "language": "python", 109 | "name": "py36-torch1.1-cuda10" 110 | }, 111 | "language_info": { 112 | "codemirror_mode": { 113 | "name": "ipython", 114 | "version": 3 115 | }, 116 | "file_extension": ".py", 117 | "mimetype": "text/x-python", 118 | "name": "python", 119 | "nbconvert_exporter": "python", 120 | "pygments_lexer": "ipython3", 121 | "version": "3.6.8" 122 | } 123 | }, 124 | "nbformat": 4, 125 | "nbformat_minor": 4 126 | } 127 | -------------------------------------------------------------------------------- /notebooks/useful_modules.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Useful modules in jupyter notebook\n", 8 | "- This notebook includes some basic tools in notebook" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "### module path, file path\n", 16 | "- add your module path (different from where the notebook is)\n", 17 | "- change your base path" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 2, 23 | "metadata": {}, 24 | "outputs": [ 25 | { 26 | "name": "stdout", 27 | "output_type": "stream", 28 | "text": [ 29 | "/home/yoyee/Documents\n" 30 | ] 31 | } 32 | ], 33 | "source": [ 34 | "import os\n", 35 | "import sys\n", 36 | "# add your module path\n", 37 | "module_path = os.path.abspath(os.path.join('..'))\n", 38 | "if module_path not in sys.path:\n", 39 | " sys.path.append(module_path)\n", 40 | "# change your base path\n", 41 | "os.chdir('../')\n", 42 | "print(os.getcwd())" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "### auto-reload\n", 50 | "- when you change anything in python files, the notebook will reload automatically." 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "%load_ext autoreload\n", 60 | "%autoreload 2" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "### Display\n", 68 | "- fit your notebook as wide as the browser" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "metadata": {}, 75 | "outputs": [], 76 | "source": [ 77 | "from IPython.core.display import display, HTML\n", 78 | "display(HTML(\"\"))" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "metadata": {}, 84 | "source": [ 85 | "### Others" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "import numpy as np\n", 95 | "import matplotlib.pyplot as plt" 96 | ] 97 | } 98 | ], 99 | "metadata": { 100 | "kernelspec": { 101 | "display_name": "py36_pytorch", 102 | "language": "python", 103 | "name": "py36_pytorch" 104 | }, 105 | "language_info": { 106 | "codemirror_mode": { 107 | "name": "ipython", 108 | "version": 3 109 | }, 110 | "file_extension": ".py", 111 | "mimetype": "text/x-python", 112 | "name": "python", 113 | "nbconvert_exporter": "python", 114 | "pygments_lexer": "ipython3", 115 | "version": "3.6.7" 116 | } 117 | }, 118 | "nbformat": 4, 119 | "nbformat_minor": 2 120 | } 121 | -------------------------------------------------------------------------------- /pretrained/superpoint_v1.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/pretrained/superpoint_v1.pth -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # torch >= 0.4.1 2 | # torchvision 3 | argparse 4 | scipy 5 | opencv-python==3.4.2.16 6 | opencv-contrib-python==3.4.2.16 7 | matplotlib 8 | imageio 9 | tqdm 10 | tensorflow==1.14.0 11 | tensorboardX 12 | tqdm 13 | pyyaml 14 | imageio 15 | imgaug 16 | jupyter 17 | scikit-learn 18 | 19 | torchgeometry 20 | torchsummary 21 | coloredlogs 22 | -------------------------------------------------------------------------------- /requirements_torch.txt: -------------------------------------------------------------------------------- 1 | torch 2 | torchvision 3 | -------------------------------------------------------------------------------- /run_export.sh: -------------------------------------------------------------------------------- 1 | ## quick script to run export and evaluation 2 | 3 | export_folder='superpoint_coco_heat2_0_170k_nms4_det0.015' 4 | # export_folder='superpoint_kitti_heat2_0' 5 | echo $export_folder 6 | # python3 export.py export_descriptor configs/magicpoint_repeatability_heatmap.yaml $export_folder 7 | python3 evaluation.py /home/yyjau/Documents/deepSfm_test/logs/$export_folder/predictions --repeatibility --homography --outputImg --plotMatching 8 | 9 | -------------------------------------------------------------------------------- /settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | paths defined here are used in many places 3 | """ 4 | 5 | DATA_PATH = 'datasets' # path for datasets 6 | EXPER_PATH = 'logs' # path for saving checkpoints 7 | SYN_TMPDIR = './datasets/' # path for dumping synthetic data 8 | DEBUG = False # true: will make synthetic data only uses draw_checkboard and ignore other classes 9 | # DEBUG = False 10 | 11 | -------------------------------------------------------------------------------- /test/sample_homography.py: -------------------------------------------------------------------------------- 1 | """Testing file for homography (not sorted yet) 2 | """ 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | import torch 7 | # corner_img = np.array([(0, 0), (img_w, 0), (0, img_h), (img_w, img_h)]) 8 | import cv2 9 | from utils.utils import warp_points_np 10 | 11 | 12 | def sample_homography(inv_scale=3): 13 | corner_img = np.array([(-1, -1), (-1, 1), (1, -1), (1, 1)]) 14 | offset_r = 1 - 1/inv_scale 15 | img_offset = np.array([(-1, -1), (-1, offset_r), (offset_r, -1), (offset_r, offset_r)]) 16 | corner_map = np.random.rand(4,2)/inv_scale + img_offset 17 | matrix = cv2.getPerspectiveTransform(np.float32(corner_img), np.float32(corner_map)) 18 | return matrix 19 | 20 | 21 | 22 | import matplotlib.pyplot as plt 23 | 24 | def plot_points(matrix, ls='--', lw=1.2, colors=None): 25 | x_points, y_points = matrix[:,0], matrix[:,1] 26 | size = len(x_points) 27 | colors = ['red', 'blue', 'orange', 'green'] if not None else colors 28 | for i in range(size): 29 | plt.plot(x_points[i], y_points[i], color=colors[i], marker='o') 30 | # plt.plot(x_points[i], x_points[(i+1) % size],color=colors[i],linestyle=ls, linewidth=lw) 31 | # plt.plot(x_points[i], x_points[(i + 1) % size], linestyle=ls, linewidth=lw) 32 | # [y_points[i], y_points[(i+1) % size]], 33 | # color=colors[i], 34 | # linestyle=ls, linewidth=lw) 35 | 36 | def printCorners(corner_img, mat_homographies): 37 | points = warp_points_np(corner_img, mat_homographies) 38 | # plot 39 | plot_points(corner_img) 40 | for i in range(points.shape[0]): 41 | plot_points(points[i,:,:]) 42 | plt.show() 43 | 44 | def test_sample_homography(): 45 | batch_size = 30 46 | filename = '../configs/superpoint_coco_train.yaml' 47 | import yaml 48 | with open(filename, 'r') as f: 49 | config = yaml.load(f) 50 | test_tf = False 51 | test_corner_def = True 52 | 53 | if test_tf == True: 54 | from utils.homographies import sample_homography as sample_homography 55 | boundary = 1 56 | # from utils.homographies import sample_homography_np as sample_homography 57 | # mat_homographies = matrix[np.newaxis, :,:] 58 | # mat_homographies = [sample_homography(tf.constant([boundary,boundary]), 59 | mat_homographies = [sample_homography(np.array([boundary,boundary]), 60 | **config['data']['warped_pair']['params']) for i in range(batch_size)] 61 | mat_homographies = np.stack(mat_homographies, axis=0) 62 | corner_img = np.array([[0., 0.], [0., boundary], [boundary, boundary], [boundary, 0.]]) 63 | printCorners(corner_img, mat_homographies) 64 | 65 | if test_corner_def: 66 | corner_img = np.array([(-1, -1), (-1, 1), (1, 1), (1, -1)]) 67 | from utils.homographies import sample_homography_np as sample_homography 68 | boundary = 2 69 | mat_homographies = [sample_homography(np.array([boundary,boundary]), shift=-1, 70 | **config['data']['warped_pair']['params']) for i in range(batch_size)] 71 | mat_homographies = np.stack(mat_homographies, axis=0) 72 | printCorners(corner_img, mat_homographies) 73 | 74 | 75 | else: 76 | from utils.utils import sample_homography 77 | mat_homographies = [sample_homography(1) for i in range(batch_size)] 78 | 79 | # sess = tf.Session() 80 | # with sess.as_default(): 81 | # m = mat_homographies[0].eval() 82 | 83 | print("end") 84 | 85 | def test_valid_mask(): 86 | from utils.utils import pltImshow 87 | batch_size = 1 88 | mat_homographies = [sample_homography(3) for i in range(batch_size)] 89 | mat_H = np.stack(mat_homographies, axis=0) 90 | 91 | 92 | corner_img = np.array([(-1, -1), (-1, 1), (1, -1), (1, 1)]) 93 | # printCorners(corner_img, mat_H) 94 | # points = warp_points_np(corner_img, mat_homographies) 95 | 96 | mat_H = torch.tensor(mat_H, dtype=torch.float32) 97 | mat_H_inv = torch.stack([torch.inverse(mat_H[i, :, :]) for i in range(batch_size)]) 98 | from utils.utils import compute_valid_mask, labels2Dto3D 99 | device = 'cpu' 100 | shape = torch.tensor([240, 320]) 101 | for i in range(1): 102 | r = 3 103 | mask_valid = compute_valid_mask(shape, inv_homography=mat_H_inv, device=device, erosion_radius=r) 104 | pltImshow(mask_valid[0,:,:]) 105 | cell_size = 8 106 | mask_valid = labels2Dto3D(mask_valid.view(batch_size, 1, mask_valid.shape[1], mask_valid.shape[2]), cell_size=cell_size) 107 | mask_valid = torch.prod(mask_valid[:,:cell_size*cell_size,:,:], dim=1) 108 | pltImshow(mask_valid[0,:,:].cpu().numpy()) 109 | 110 | mask = {} 111 | mask.update({'homographies': mat_H, 'masks': mask_valid}) 112 | np.savez_compressed('h2.npz', **mask) 113 | print("finish testing valid mask") 114 | 115 | if __name__ == '__main__': 116 | # test_sample_homography() 117 | test_valid_mask() 118 | 119 | 120 | ''' 121 | x_points = np.array([0, 0, 20, 20]) 122 | y_points = np.array([0, 20, 20, 0]) 123 | matrix = np.array([x_points, y_points]) 124 | # colors = ['red', 'blue', 'magenta', 'green'] 125 | colors = ['r', 'b', 'm', 'g'] 126 | size = len(x_points) 127 | plot_points(matrix, colors) 128 | plt.ylim([-5,25]) 129 | plt.xlim([-5,25]) 130 | plt.axes().set_aspect('equal') 131 | plt.show() 132 | ''' -------------------------------------------------------------------------------- /test/visualize_warping.py: -------------------------------------------------------------------------------- 1 | """Testing file (not sorted yet) 2 | 3 | """ 4 | 5 | import torch 6 | import numpy as np 7 | 8 | 9 | from utils.utils import inv_warp_image_batch 10 | from numpy.linalg import inv 11 | import cv2 12 | import matplotlib.pyplot as plt 13 | from utils.draw import plot_imgs 14 | 15 | from utils.utils import pltImshow 16 | path = '/home/yoyee/Documents/deepSfm/logs/superpoint_hpatches_pretrained/predictions/' 17 | for i in range(10): 18 | data = np.load(path + str(i) + '.npz') 19 | # p1 = '/home/yoyee/Documents/deepSfm/datasets/HPatches/v_abstract/1.ppm' 20 | # p2 = '/home/yoyee/Documents/deepSfm/datasets/HPatches/v_abstract/2.ppm' 21 | # H = '/home/yoyee/Documents/deepSfm/datasets/HPatches/v_abstract/H_1_2' 22 | # img = np.load(p1) 23 | # warped_img = np.load(p2) 24 | 25 | H = data['homography'] 26 | img1 = data['image'][:,:,np.newaxis] 27 | img2 = data['warped_image'][:,:,np.newaxis] 28 | # warped_img_H = inv_warp_image_batch(torch.tensor(img), torch.tensor(inv(H))) 29 | warped_img1 = cv2.warpPerspective(img1, H, (img1.shape[1], img1.shape[0])) 30 | 31 | 32 | # img_cat = np.concatenate((img, warped_img, warped_img_H), axis=1) 33 | # pltImshow(img_cat) 34 | 35 | # from numpy.linalg import inv 36 | # warped_img1 = cv2.warpPerspective(img1, inv(H), (img2.shape[1], img2.shape[0])) 37 | img1 = np.concatenate([img1, img1, img1], axis=2) 38 | warped_img1 = np.stack([warped_img1, warped_img1, warped_img1], axis=2) 39 | img2 = np.concatenate([img2, img2, img2], axis=2) 40 | plot_imgs([img1, img2, warped_img1], titles=['img1', 'img2', 'warped_img1'], dpi=200) 41 | plt.savefig( 'test' + str(i) + '.png') -------------------------------------------------------------------------------- /train4.py: -------------------------------------------------------------------------------- 1 | """Training script 2 | This is the training script for superpoint detector and descriptor. 3 | 4 | Author: You-Yi Jau, Rui Zhu 5 | Date: 2019/12/12 6 | """ 7 | 8 | import argparse 9 | import yaml 10 | import os 11 | import logging 12 | 13 | import torch 14 | import torch.optim 15 | import torch.utils.data 16 | 17 | from tensorboardX import SummaryWriter 18 | 19 | # from utils.utils import tensor2array, save_checkpoint, load_checkpoint, save_path_formatter 20 | from utils.utils import getWriterPath 21 | from settings import EXPER_PATH 22 | 23 | ## loaders: data, model, pretrained model 24 | from utils.loader import dataLoader, modelLoader, pretrainedLoader 25 | from utils.logging import * 26 | # from models.model_wrap import SuperPointFrontend_torch, PointTracker 27 | 28 | ###### util functions ###### 29 | def datasize(train_loader, config, tag='train'): 30 | logging.info('== %s split size %d in %d batches'%\ 31 | (tag, len(train_loader)*config['model']['batch_size'], len(train_loader))) 32 | pass 33 | 34 | from utils.loader import get_save_path 35 | 36 | ###### util functions end ###### 37 | 38 | 39 | ###### train script ###### 40 | def train_base(config, output_dir, args): 41 | return train_joint(config, output_dir, args) 42 | pass 43 | 44 | # def train_joint_dsac(): 45 | # pass 46 | 47 | def train_joint(config, output_dir, args): 48 | assert 'train_iter' in config 49 | 50 | # config 51 | # from utils.utils import pltImshow 52 | # from utils.utils import saveImg 53 | torch.set_default_tensor_type(torch.FloatTensor) 54 | task = config['data']['dataset'] 55 | 56 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 57 | logging.info('train on device: %s', device) 58 | with open(os.path.join(output_dir, 'config.yml'), 'w') as f: 59 | yaml.dump(config, f, default_flow_style=False) 60 | # writer = SummaryWriter(getWriterPath(task=args.command, date=True)) 61 | writer = SummaryWriter(getWriterPath(task=args.command, 62 | exper_name=args.exper_name, date=True)) 63 | ## save data 64 | save_path = get_save_path(output_dir) 65 | 66 | # data loading 67 | # data = dataLoader(config, dataset='syn', warp_input=True) 68 | data = dataLoader(config, dataset=task, warp_input=True) 69 | train_loader, val_loader = data['train_loader'], data['val_loader'] 70 | 71 | datasize(train_loader, config, tag='train') 72 | datasize(val_loader, config, tag='val') 73 | # init the training agent using config file 74 | # from train_model_frontend import Train_model_frontend 75 | from utils.loader import get_module 76 | train_model_frontend = get_module('', config['front_end_model']) 77 | 78 | train_agent = train_model_frontend(config, save_path=save_path, device=device) 79 | 80 | # writer from tensorboard 81 | train_agent.writer = writer 82 | 83 | # feed the data into the agent 84 | train_agent.train_loader = train_loader 85 | train_agent.val_loader = val_loader 86 | 87 | # load model initiates the model and load the pretrained model (if any) 88 | train_agent.loadModel() 89 | train_agent.dataParallel() 90 | 91 | try: 92 | # train function takes care of training and evaluation 93 | train_agent.train() 94 | except KeyboardInterrupt: 95 | print ("press ctrl + c, save model!") 96 | train_agent.saveModel() 97 | pass 98 | 99 | if __name__ == '__main__': 100 | # global var 101 | torch.set_default_tensor_type(torch.FloatTensor) 102 | logging.basicConfig(format='[%(asctime)s %(levelname)s] %(message)s', 103 | datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) 104 | 105 | # add parser 106 | parser = argparse.ArgumentParser() 107 | subparsers = parser.add_subparsers(dest='command') 108 | 109 | # Training command 110 | p_train = subparsers.add_parser('train_base') 111 | p_train.add_argument('config', type=str) 112 | p_train.add_argument('exper_name', type=str) 113 | p_train.add_argument('--eval', action='store_true') 114 | p_train.add_argument('--debug', action='store_true', default=False, 115 | help='turn on debuging mode') 116 | p_train.set_defaults(func=train_base) 117 | 118 | # Training command 119 | p_train = subparsers.add_parser('train_joint') 120 | p_train.add_argument('config', type=str) 121 | p_train.add_argument('exper_name', type=str) 122 | p_train.add_argument('--eval', action='store_true') 123 | p_train.add_argument('--debug', action='store_true', default=False, 124 | help='turn on debuging mode') 125 | p_train.set_defaults(func=train_joint) 126 | 127 | args = parser.parse_args() 128 | 129 | if args.debug: 130 | logging.basicConfig(format='[%(asctime)s %(levelname)s] %(message)s', 131 | datefmt='%m/%d/%Y %H:%M:%S', level=logging.DEBUG) 132 | 133 | with open(args.config, 'r') as f: 134 | config = yaml.safe_load(f) 135 | # EXPER_PATH from settings.py 136 | output_dir = os.path.join(EXPER_PATH, args.exper_name) 137 | os.makedirs(output_dir, exist_ok=True) 138 | 139 | # with capture_outputs(os.path.join(output_dir, 'log')): 140 | logging.info('Running command {}'.format(args.command.upper())) 141 | args.func(config, output_dir, args) 142 | 143 | 144 | -------------------------------------------------------------------------------- /train_tutorial.md: -------------------------------------------------------------------------------- 1 | # Training tutorial 2 | - This tutorial can walk through how to load pretrained model and run the training script. 3 | 4 | ## environment 5 | ``` 6 | conda create --name py36-torch python=3.6 7 | pip install -r requirements.txt 8 | ``` 9 | 10 | ## Required package 11 | - roi_pool: 12 | - install the package: https://github.com/open-mmlab/mmdetection/blob/master/INSTALL.md 13 | - https://github.com/open-mmlab/mmdetection/tree/master/mmdet/ops/roi_pool 14 | - put 'roi_pool_cuda.cpython-36m-x86_64-linux-gnu.so' in 'utils/roi_pool/' 15 | 16 | ## Required settings 17 | - check the config file 18 | - check the model path 19 | - set in 'pretrained' 20 | - set 'retrain' to false 21 | - (set 'reset_iter' to false) 22 | - check the data path: (can use hyperlink: ln -s) 23 | - put data in the path 'dataset' 24 | - kitti: 'datasets/kitti_wVal'(default in setting.py) 25 | - the folder name should match the one listed on ['data']['dataset'] 26 | - put the files in 'datasets/kitti_split' to 'datasets/kitti_wVal' 27 | ``` 28 | cp datasets/kitti_split/train.txt datasets/kitti_wVal/ 29 | cp datasets/kitti_split/val.txt datasets/kitti_wVal/ 30 | ``` 31 | - check the labels path 32 | - check the path: ['data']['labels'] 33 | - kitti: logs/magicpoint_synth20_homoAdapt100_kitti_h384/predictions (default) 34 | - the path uses base path EXPER_PATH (listed in settings.py) 35 | 36 | ## Run the code 37 | ``` 38 | python train4.py 39 | python train4.py train_joint --debug configs/superpoint_kitti_train_heatmap.yaml superpoint_kitti --eval 40 | 41 | ``` 42 | 43 | ## Related files 44 | - train4.py: training script (load 'train_model_frontend') 45 | - train_model_frontend.py: class for training 46 | - configs/superpoint_coco_train.yaml: path and parameter settings 47 | 48 | ## Code logic 49 | 50 | ## testing log 51 | - 2019/7/11 52 | - test python train4.py train_joint --debug configs/superpoint_kitti_train_heatmap.yaml superpoint_kitti --eval 53 | - environment: python: 3.6, pytorch: 1.1, cuda:10 54 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/utils/__init__.py -------------------------------------------------------------------------------- /utils/correspondence_tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/utils/correspondence_tools/__init__.py -------------------------------------------------------------------------------- /utils/correspondence_tools/correspondence_plotter.py: -------------------------------------------------------------------------------- 1 | import matplotlib.image as mpimg 2 | import matplotlib.pyplot as plt 3 | from matplotlib.patches import Circle 4 | 5 | def plot_correspondences(images, uv_a, uv_b, use_previous_plot=None, circ_color='g', show=True): 6 | if use_previous_plot is None: 7 | fig, axes = plt.subplots(nrows=2, ncols=2) 8 | else: 9 | fig, axes = use_previous_plot[0], use_previous_plot[1] 10 | 11 | fig.set_figheight(10) 12 | fig.set_figwidth(15) 13 | pixel_locs = [uv_a, uv_b, uv_a, uv_b] 14 | axes = axes.flat[0:] 15 | if use_previous_plot is not None: 16 | axes = [axes[1], axes[3]] 17 | images = [images[1], images[3]] 18 | pixel_locs = [pixel_locs[1], pixel_locs[3]] 19 | for ax, img, pixel_loc in zip(axes[0:], images, pixel_locs): 20 | ax.set_aspect('equal') 21 | if isinstance(pixel_loc[0], int) or isinstance(pixel_loc[0], float): 22 | circ = Circle(pixel_loc, radius=10, facecolor=circ_color, edgecolor='white', fill=True ,linewidth = 2.0, linestyle='solid') 23 | ax.add_patch(circ) 24 | else: 25 | for x,y in zip(pixel_loc[0],pixel_loc[1]): 26 | circ = Circle((x,y), radius=10, facecolor=circ_color, edgecolor='white', fill=True ,linewidth = 2.0, linestyle='solid') 27 | ax.add_patch(circ) 28 | ax.imshow(img) 29 | if show: 30 | plt.show() 31 | return None 32 | else: 33 | return fig, axes 34 | 35 | def plot_correspondences_from_dir(log_dir, img_a, img_b, uv_a, uv_b, use_previous_plot=None, circ_color='g', show=True): 36 | img1_filename = log_dir+"/images/"+img_a+"_rgb.png" 37 | img2_filename = log_dir+"/images/"+img_b+"_rgb.png" 38 | img1_depth_filename = log_dir+"/images/"+img_a+"_depth.png" 39 | img2_depth_filename = log_dir+"/images/"+img_b+"_depth.png" 40 | images = [img1_filename, img2_filename, img1_depth_filename, img2_depth_filename] 41 | images = [mpimg.imread(x) for x in images] 42 | return plot_correspondences(images, uv_a, uv_b, use_previous_plot=use_previous_plot, circ_color=circ_color, show=show) 43 | 44 | def plot_correspondences_direct(img_a_rgb, img_a_depth, img_b_rgb, img_b_depth, uv_a, uv_b, use_previous_plot=None, circ_color='g', show=True): 45 | """ 46 | 47 | Plots rgb and depth image pair along with circles at pixel locations 48 | :param img_a_rgb: PIL.Image.Image 49 | :param img_a_depth: PIL.Image.Image 50 | :param img_b_rgb: PIL.Image.Image 51 | :param img_b_depth: PIL.Image.Image 52 | :param uv_a: (u,v) pixel location, or list of pixel locations 53 | :param uv_b: (u,v) pixel location, or list of pixel locations 54 | :param use_previous_plot: 55 | :param circ_color: str 56 | :param show: 57 | :return: 58 | """ 59 | images = [img_a_rgb, img_b_rgb, img_a_depth, img_b_depth] 60 | return plot_correspondences(images, uv_a, uv_b, use_previous_plot=use_previous_plot, circ_color=circ_color, show=show) 61 | 62 | -------------------------------------------------------------------------------- /utils/cp_labels.py: -------------------------------------------------------------------------------- 1 | """copy labels out of images (step 2) 2 | """ 3 | 4 | import subprocess 5 | from glob import glob 6 | import os 7 | 8 | source_folder = 'magicpoint_synth20_homoAdapt100_kitti_h384' 9 | target_folder = f"{source_folder}_labels" 10 | base_path = '/data/kitti' 11 | middle_path = 'predictions/' 12 | final_folder = 'train' 13 | folders = glob(f'{base_path}/{source_folder}/{middle_path}/{final_folder}/*') 14 | 15 | # print(f"folders: {folders}") 16 | for f in folders: 17 | if os.path.isdir(f) == False: 18 | continue 19 | f_target = str(f).replace(source_folder, target_folder) 20 | command = f'rsync -rh {f}/*.npz {f_target}' 21 | print(f"command: {command}") 22 | subprocess.run(f"{command}", shell=True, check=True) 23 | 24 | print(f"total folders: {len(folders)}") 25 | -------------------------------------------------------------------------------- /utils/d2s.py: -------------------------------------------------------------------------------- 1 | """Module used to change 2D labels to 3D labels and vise versa. 2 | Mimic function from tensorflow. 3 | 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | class DepthToSpace(nn.Module): 9 | def __init__(self, block_size): 10 | super(DepthToSpace, self).__init__() 11 | self.block_size = block_size 12 | self.block_size_sq = block_size*block_size 13 | 14 | def forward(self, input): 15 | output = input.permute(0, 2, 3, 1) 16 | (batch_size, d_height, d_width, d_depth) = output.size() 17 | s_depth = int(d_depth / self.block_size_sq) 18 | s_width = int(d_width * self.block_size) 19 | s_height = int(d_height * self.block_size) 20 | t_1 = output.reshape(batch_size, d_height, d_width, self.block_size_sq, s_depth) 21 | spl = t_1.split(self.block_size, 3) 22 | stack = [t_t.reshape(batch_size, d_height, s_width, s_depth) for t_t in spl] 23 | output = torch.stack(stack,0).transpose(0,1).permute(0,2,1,3,4).reshape(batch_size, s_height, s_width, s_depth) 24 | output = output.permute(0, 3, 1, 2) 25 | return output 26 | 27 | class SpaceToDepth(nn.Module): 28 | def __init__(self, block_size): 29 | super(SpaceToDepth, self).__init__() 30 | self.block_size = block_size 31 | self.block_size_sq = block_size*block_size 32 | 33 | def forward(self, input): 34 | output = input.permute(0, 2, 3, 1) 35 | (batch_size, s_height, s_width, s_depth) = output.size() 36 | d_depth = s_depth * self.block_size_sq 37 | d_width = int(s_width / self.block_size) 38 | d_height = int(s_height / self.block_size) 39 | t_1 = output.split(self.block_size, 2) 40 | stack = [t_t.reshape(batch_size, d_height, d_depth) for t_t in t_1] 41 | output = torch.stack(stack, 1) 42 | output = output.permute(0, 2, 1, 3) 43 | output = output.permute(0, 3, 1, 2) 44 | return output 45 | -------------------------------------------------------------------------------- /utils/draw.py: -------------------------------------------------------------------------------- 1 | """util functions for visualization 2 | 3 | """ 4 | 5 | import argparse 6 | import time 7 | import csv 8 | import yaml 9 | import os 10 | import logging 11 | from pathlib import Path 12 | 13 | import numpy as np 14 | from tqdm import tqdm 15 | 16 | from tensorboardX import SummaryWriter 17 | import cv2 18 | import matplotlib.pyplot as plt 19 | 20 | 21 | def plot_imgs(imgs, titles=None, cmap='brg', ylabel='', normalize=False, ax=None, dpi=100): 22 | n = len(imgs) 23 | if not isinstance(cmap, list): 24 | cmap = [cmap]*n 25 | if ax is None: 26 | fig, ax = plt.subplots(1, n, figsize=(6*n, 6), dpi=dpi) 27 | if n == 1: 28 | ax = [ax] 29 | else: 30 | if not isinstance(ax, list): 31 | ax = [ax] 32 | assert len(ax) == len(imgs) 33 | for i in range(n): 34 | if imgs[i].shape[-1] == 3: 35 | imgs[i] = imgs[i][..., ::-1] # BGR to RGB 36 | ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmap[i]), 37 | vmin=None if normalize else 0, 38 | vmax=None if normalize else 1) 39 | if titles: 40 | ax[i].set_title(titles[i]) 41 | ax[i].get_yaxis().set_ticks([]) 42 | ax[i].get_xaxis().set_ticks([]) 43 | for spine in ax[i].spines.values(): # remove frame 44 | spine.set_visible(False) 45 | ax[0].set_ylabel(ylabel) 46 | plt.tight_layout() 47 | 48 | 49 | # from utils.draw import img_overlap 50 | def img_overlap(img_r, img_g, img_gray): # img_b repeat 51 | img = np.concatenate((img_gray, img_gray, img_gray), axis=0) 52 | img[0, :, :] += img_r[0, :, :] 53 | img[1, :, :] += img_g[0, :, :] 54 | img[img > 1] = 1 55 | img[img < 0] = 0 56 | return img 57 | 58 | def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3): 59 | ''' 60 | 61 | :param img: 62 | image: 63 | numpy [H, W] 64 | :param corners: 65 | Points 66 | numpy [N, 2] 67 | :param color: 68 | :param radius: 69 | :param s: 70 | :return: 71 | overlaying image 72 | numpy [H, W] 73 | ''' 74 | img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[..., np.newaxis], 3, -1) 75 | for c in np.stack(corners).T: 76 | # cv2.circle(img, tuple(s * np.flip(c, 0)), radius, color, thickness=-1) 77 | cv2.circle(img, tuple((s * c[:2]).astype(int)), radius, color, thickness=-1) 78 | return img 79 | 80 | # def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3): 81 | # ''' 82 | 83 | # :param img: 84 | # np (H, W) 85 | # :param corners: 86 | # np (3, N) 87 | # :param color: 88 | # :param radius: 89 | # :param s: 90 | # :return: 91 | # ''' 92 | # img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[..., np.newaxis], 3, -1) 93 | # for c in np.stack(corners).T: 94 | # # cv2.circle(img, tuple(s * np.flip(c, 0)), radius, color, thickness=-1) 95 | # cv2.circle(img, tuple((s*c[:2]).astype(int)), radius, color, thickness=-1) 96 | # return img 97 | 98 | def draw_matches(rgb1, rgb2, match_pairs, lw = 0.5, color='g', if_fig=True, 99 | filename='matches.png', show=False): 100 | ''' 101 | 102 | :param rgb1: 103 | image1 104 | numpy (H, W) 105 | :param rgb2: 106 | image2 107 | numpy (H, W) 108 | :param match_pairs: 109 | numpy (keypoiny1 x, keypoint1 y, keypoint2 x, keypoint 2 y) 110 | :return: 111 | None 112 | ''' 113 | from matplotlib import pyplot as plt 114 | 115 | h1, w1 = rgb1.shape[:2] 116 | h2, w2 = rgb2.shape[:2] 117 | canvas = np.zeros((max(h1, h2), w1 + w2, 3), dtype=rgb1.dtype) 118 | canvas[:h1, :w1] = rgb1[:,:,np.newaxis] 119 | canvas[:h2, w1:] = rgb2[:,:,np.newaxis] 120 | # fig = plt.figure(frameon=False) 121 | if if_fig: 122 | fig = plt.figure(figsize=(15,5)) 123 | plt.axis("off") 124 | plt.imshow(canvas, zorder=1) 125 | 126 | xs = match_pairs[:, [0, 2]] 127 | xs[:, 1] += w1 128 | ys = match_pairs[:, [1, 3]] 129 | 130 | alpha = 1 131 | sf = 5 132 | # lw = 0.5 133 | # markersize = 1 134 | markersize = 2 135 | 136 | plt.plot( 137 | xs.T, ys.T, 138 | alpha=alpha, 139 | linestyle="-", 140 | linewidth=lw, 141 | aa=False, 142 | marker='o', 143 | markersize=markersize, 144 | fillstyle='none', 145 | color=color, 146 | zorder=2, 147 | # color=[0.0, 0.8, 0.0], 148 | ); 149 | plt.tight_layout() 150 | if filename is not None: 151 | plt.savefig(filename, dpi=300, bbox_inches='tight') 152 | print('#Matches = {}'.format(len(match_pairs))) 153 | if show: 154 | plt.show() 155 | 156 | 157 | 158 | # from utils.draw import draw_matches_cv 159 | def draw_matches_cv(data): 160 | keypoints1 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints1']] 161 | keypoints2 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints2']] 162 | inliers = data['inliers'].astype(bool) 163 | matches = np.array(data['matches'])[inliers].tolist() 164 | def to3dim(img): 165 | if img.ndim == 2: 166 | img = img[:, :, np.newaxis] 167 | return img 168 | img1 = to3dim(data['image1']) 169 | img2 = to3dim(data['image2']) 170 | img1 = np.concatenate([img1, img1, img1], axis=2) 171 | img2 = np.concatenate([img2, img2, img2], axis=2) 172 | return cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches, 173 | None, matchColor=(0,255,0), singlePointColor=(0, 0, 255)) 174 | 175 | 176 | def drawBox(points, img, offset=np.array([0,0]), color=(0,255,0)): 177 | # print("origin", points) 178 | offset = offset[::-1] 179 | points = points + offset 180 | points = points.astype(int) 181 | for i in range(len(points)): 182 | img = img + cv2.line(np.zeros_like(img),tuple(points[-1+i]), tuple(points[i]), color,5) 183 | return img 184 | 185 | -------------------------------------------------------------------------------- /utils/logging.py: -------------------------------------------------------------------------------- 1 | """colorful logging 2 | # import the whole file 3 | """ 4 | 5 | import coloredlogs, logging 6 | logging.basicConfig() 7 | logger = logging.getLogger() 8 | coloredlogs.install(level='INFO', logger=logger) 9 | 10 | from termcolor import colored, cprint 11 | # from sty import fg, bg, ef, rs 12 | 13 | def toRed(text): 14 | return colored(text, 'red', attrs=['reverse']) 15 | 16 | def toCyan(text): 17 | return colored(text, 'cyan', attrs=['reverse']) 18 | -------------------------------------------------------------------------------- /utils/loss_functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/eric-yyjau/pytorch-superpoint/5eb75d74df27c07f6e7311df8f167e2a9c01a798/utils/loss_functions/__init__.py -------------------------------------------------------------------------------- /utils/photometric_augmentation.py: -------------------------------------------------------------------------------- 1 | """ deprecated: photometric augmentation from tensorflow implementation 2 | # not used in our pipeline 3 | # need to verify if synthetic generation uses it. 4 | """ 5 | import cv2 as cv 6 | import numpy as np 7 | import tensorflow as tf 8 | 9 | 10 | augmentations = [ 11 | 'additive_gaussian_noise', 12 | 'additive_speckle_noise', 13 | 'random_brightness', 14 | 'random_contrast', 15 | 'additive_shade', 16 | 'motion_blur' 17 | ] 18 | 19 | 20 | def additive_gaussian_noise(image, stddev_range=[5, 95]): 21 | stddev = tf.random_uniform((), *stddev_range) 22 | noise = tf.random_normal(tf.shape(image), stddev=stddev) 23 | noisy_image = tf.clip_by_value(image + noise, 0, 255) 24 | return noisy_image 25 | 26 | 27 | def additive_speckle_noise(image, prob_range=[0.0, 0.005]): 28 | prob = tf.random_uniform((), *prob_range) 29 | sample = tf.random_uniform(tf.shape(image)) 30 | noisy_image = tf.where(sample <= prob, tf.zeros_like(image), image) 31 | noisy_image = tf.where(sample >= (1. - prob), 255.*tf.ones_like(image), noisy_image) 32 | return noisy_image 33 | 34 | 35 | def random_brightness(image, max_abs_change=50): 36 | return tf.clip_by_value(tf.image.random_brightness(image, max_abs_change), 0, 255) 37 | 38 | 39 | def random_contrast(image, strength_range=[0.5, 1.5]): 40 | return tf.clip_by_value(tf.image.random_contrast(image, *strength_range), 0, 255) 41 | 42 | 43 | def additive_shade(image, nb_ellipses=20, transparency_range=[-0.5, 0.8], 44 | kernel_size_range=[250, 350]): 45 | 46 | def _py_additive_shade(img): 47 | min_dim = min(img.shape[:2]) / 4 48 | mask = np.zeros(img.shape[:2], np.uint8) 49 | for i in range(nb_ellipses): 50 | ax = int(max(np.random.rand() * min_dim, min_dim / 5)) 51 | ay = int(max(np.random.rand() * min_dim, min_dim / 5)) 52 | max_rad = max(ax, ay) 53 | x = np.random.randint(max_rad, img.shape[1] - max_rad) # center 54 | y = np.random.randint(max_rad, img.shape[0] - max_rad) 55 | angle = np.random.rand() * 90 56 | cv.ellipse(mask, (x, y), (ax, ay), angle, 0, 360, 255, -1) 57 | 58 | transparency = np.random.uniform(*transparency_range) 59 | kernel_size = np.random.randint(*kernel_size_range) 60 | if (kernel_size % 2) == 0: # kernel_size has to be odd 61 | kernel_size += 1 62 | mask = cv.GaussianBlur(mask.astype(np.float32), (kernel_size, kernel_size), 0) 63 | shaded = img * (1 - transparency * mask[..., np.newaxis]/255.) 64 | return np.clip(shaded, 0, 255) 65 | 66 | shaded = tf.py_func(_py_additive_shade, [image], tf.float32) 67 | res = tf.reshape(shaded, tf.shape(image)) 68 | return res 69 | 70 | 71 | def motion_blur(image, max_kernel_size=10): 72 | 73 | def _py_motion_blur(img): 74 | # Either vertial, hozirontal or diagonal blur 75 | mode = np.random.choice(['h', 'v', 'diag_down', 'diag_up']) 76 | ksize = np.random.randint(0, (max_kernel_size+1)/2)*2 + 1 # make sure is odd 77 | center = int((ksize-1)/2) 78 | kernel = np.zeros((ksize, ksize)) 79 | if mode == 'h': 80 | kernel[center, :] = 1. 81 | elif mode == 'v': 82 | kernel[:, center] = 1. 83 | elif mode == 'diag_down': 84 | kernel = np.eye(ksize) 85 | elif mode == 'diag_up': 86 | kernel = np.flip(np.eye(ksize), 0) 87 | var = ksize * ksize / 16. 88 | grid = np.repeat(np.arange(ksize)[:, np.newaxis], ksize, axis=-1) 89 | gaussian = np.exp(-(np.square(grid-center)+np.square(grid.T-center))/(2.*var)) 90 | kernel *= gaussian 91 | kernel /= np.sum(kernel) 92 | img = cv.filter2D(img, -1, kernel) 93 | return img 94 | 95 | blurred = tf.py_func(_py_motion_blur, [image], tf.float32) 96 | return tf.reshape(blurred, tf.shape(image)) 97 | -------------------------------------------------------------------------------- /utils/print_tool.py: -------------------------------------------------------------------------------- 1 | """tools to print object shape or type 2 | 3 | """ 4 | 5 | 6 | # from utils.print_tool import print_config 7 | def print_config(config, file=None): 8 | print('='*10, ' important config: ', '='*10, file=file) 9 | for item in list(config): 10 | print(item, ": ", config[item], file=file) 11 | 12 | print('='*32) 13 | 14 | # from utils.print_tool import print_dict_attr 15 | def print_dict_attr(dictionary, attr=None, file=None): 16 | for item in list(dictionary): 17 | d = dictionary[item] 18 | if attr == None: 19 | print(item, ": ", d, file=file) 20 | else: 21 | if hasattr(d, attr): 22 | print(item, ": ", getattr(d, attr), file=file) 23 | else: 24 | print(item, ": ", len(d), file=file) 25 | 26 | import logging 27 | # from utils.print_tool import datasize 28 | def datasize(train_loader, config, tag='train'): 29 | logging.info('== %s split size %d in %d batches'%\ 30 | (tag, len(train_loader)*config['model']['batch_size'], len(train_loader))) 31 | pass -------------------------------------------------------------------------------- /utils/tools.py: -------------------------------------------------------------------------------- 1 | """tools to combine dictionary 2 | 3 | """ 4 | import collections 5 | 6 | 7 | def dict_update(d, u): 8 | """Improved update for nested dictionaries. 9 | 10 | Arguments: 11 | d: The dictionary to be updated. 12 | u: The update dictionary. 13 | 14 | Returns: 15 | The updated dictionary. 16 | """ 17 | for k, v in u.items(): 18 | if isinstance(v, collections.Mapping): 19 | d[k] = dict_update(d.get(k, {}), v) 20 | else: 21 | d[k] = v 22 | return d 23 | -------------------------------------------------------------------------------- /utils/var_dim.py: -------------------------------------------------------------------------------- 1 | """change the dimension of tensor/ numpy array 2 | """ 3 | 4 | import numpy as np 5 | import torch 6 | 7 | 8 | # from utils.var_dim import to3dim 9 | def to3dim(img): 10 | if img.ndim == 2: 11 | img = img[:, :, np.newaxis] 12 | return img 13 | 14 | 15 | # torch 16 | # from utils.var_dim import tensorto4d 17 | def tensorto4d(inp): 18 | if len(inp.shape) == 2: 19 | inp = inp.view(1, 1, inp.shape[0], inp.shape[1]) 20 | elif len(inp.shape) == 3: 21 | inp = inp.view(1, inp.shape[0], inp.shape[1], inp.shape[2]) 22 | return inp 23 | 24 | # torch 25 | # from utils.var_dim import squeezeToNumpy 26 | def squeezeToNumpy(tensor_arr): 27 | return tensor_arr.detach().cpu().numpy().squeeze() 28 | 29 | # from utils.var_dim import toNumpy 30 | def toNumpy(tensor): 31 | return tensor.detach().cpu().numpy() --------------------------------------------------------------------------------