├── .gitmodules ├── LICENSE ├── README.md ├── __init__.py ├── assets ├── grid_random_5_10_hist.png └── grid_random_5_10_roc.png ├── cache ├── test_128_pairs.pth ├── test_256_pairs.pth ├── test_512_pairs.pth ├── test_STGL_pairs.pth ├── val_128_pairs.pth ├── val_256_pairs.pth ├── val_512_pairs.pth └── vocabulary │ └── dinov2_vitg14 │ └── l31_value_c32 │ └── thermal │ └── c_centers.pt ├── env.yml ├── eval_global.sh ├── eval_local_ue.sh ├── generate_pairs.sh ├── global_pipeline ├── LICENSE ├── LICENSE_FOR_REFERENCE ├── README.md ├── __init__.py ├── anyloc │ ├── LICENSE │ ├── __init__.py │ └── utilities.py ├── commons.py ├── datasets_ws.py ├── eval.py ├── eval_anyloc.py ├── eval_pix2pix.py ├── eval_pix2pix_generate_h5.py ├── eval_pix2pix_generate_h5_exclude.py ├── folder_config.yml ├── h5_merger.py ├── h5_transformer.py ├── model │ ├── Deit.py │ ├── __init__.py │ ├── aggregation.py │ ├── cct │ │ ├── __init__.py │ │ ├── cct.py │ │ ├── embedder.py │ │ ├── helpers.py │ │ ├── stochastic_depth.py │ │ ├── tokenizer.py │ │ └── transformers.py │ ├── functional.py │ ├── network.py │ ├── non_local.py │ ├── normalization.py │ ├── pix2pix_networks │ │ ├── LICENSE │ │ ├── __init__.py │ │ └── networks.py │ ├── pos_embed.py │ ├── r2former.py │ └── sync_batchnorm │ │ ├── __init__.py │ │ ├── batchnorm.py │ │ ├── batchnorm_reimpl.py │ │ ├── comm.py │ │ ├── replicate.py │ │ └── unittest.py ├── parser.py ├── plotting.py ├── test.py ├── test_anyloc.py ├── train.py ├── train_pix2pix.py └── util.py ├── keypoint_pipeline ├── myloftr │ ├── commons.py │ ├── datasets_4cor_img.py │ ├── model │ │ ├── network.py │ │ └── sync_batchnorm │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-39.pyc │ │ │ ├── batchnorm.cpython-39.pyc │ │ │ ├── comm.cpython-39.pyc │ │ │ └── replicate.cpython-39.pyc │ │ │ ├── batchnorm.py │ │ │ ├── batchnorm_reimpl.py │ │ │ ├── comm.py │ │ │ ├── replicate.py │ │ │ └── unittest.py │ ├── myevaluate.py │ ├── parser.py │ ├── plot_hist.py │ └── utils.py └── myr2d2 │ ├── commons.py │ ├── datasets_4cor_img.py │ ├── evaluate.py │ ├── extract.py │ ├── model │ ├── ap_loss.py │ ├── losses.py │ ├── network.py │ ├── patchnet.py │ ├── reliability_loss.py │ ├── repeatability_loss.py │ ├── sampler.py │ └── sync_batchnorm │ │ ├── __init__.py │ │ ├── batchnorm.py │ │ ├── batchnorm_reimpl.py │ │ ├── comm.py │ │ ├── replicate.py │ │ └── unittest.py │ ├── myevaluate.py │ ├── parser.py │ ├── plot_hist.py │ ├── tools │ ├── common.py │ ├── dataloader.py │ ├── trainer.py │ ├── transforms.py │ ├── transforms_tools.py │ └── viz.py │ ├── train_key.py │ └── utils.py ├── local_pipeline ├── LICENSE ├── __init__.py ├── commons.py ├── corr.py ├── datasets_4cor_img.py ├── ensembles │ ├── ensemble_128_DHN.txt │ ├── ensemble_128_IHN.txt │ ├── ensemble_128_STHN.txt │ ├── ensemble_256_DHN.txt │ ├── ensemble_256_IHN.txt │ ├── ensemble_256_STHN.txt │ ├── ensemble_512_DHN.txt │ ├── ensemble_512_IHN.txt │ └── ensemble_512_STHN.txt ├── evaluate.py ├── extractor.py ├── model │ ├── ATT │ │ ├── attention_layer.py │ │ ├── attention_package.egg-info │ │ │ ├── PKG-INFO │ │ │ ├── SOURCES.txt │ │ │ ├── dependency_links.txt │ │ │ └── top_level.txt │ │ ├── dist │ │ │ └── attention_package-0.2-py3.9-linux-x86_64.egg │ │ ├── pybind11-master │ │ │ ├── .appveyor.yml │ │ │ ├── .gitignore │ │ │ ├── .gitmodules │ │ │ ├── .readthedocs.yml │ │ │ ├── .travis.yml │ │ │ ├── CMakeLists.txt │ │ │ ├── CONTRIBUTING.md │ │ │ ├── ISSUE_TEMPLATE.md │ │ │ ├── LICENSE │ │ │ ├── MANIFEST.in │ │ │ ├── README.md │ │ │ ├── docs │ │ │ │ ├── Doxyfile │ │ │ │ ├── _static │ │ │ │ │ └── theme_overrides.css │ │ │ │ ├── advanced │ │ │ │ │ ├── cast │ │ │ │ │ │ ├── chrono.rst │ │ │ │ │ │ ├── custom.rst │ │ │ │ │ │ ├── eigen.rst │ │ │ │ │ │ ├── functional.rst │ │ │ │ │ │ ├── index.rst │ │ │ │ │ │ ├── overview.rst │ │ │ │ │ │ ├── stl.rst │ │ │ │ │ │ └── strings.rst │ │ │ │ │ ├── classes.rst │ │ │ │ │ ├── embedding.rst │ │ │ │ │ ├── exceptions.rst │ │ │ │ │ ├── functions.rst │ │ │ │ │ ├── misc.rst │ │ │ │ │ ├── pycpp │ │ │ │ │ │ ├── index.rst │ │ │ │ │ │ ├── numpy.rst │ │ │ │ │ │ ├── object.rst │ │ │ │ │ │ └── utilities.rst │ │ │ │ │ └── smart_ptrs.rst │ │ │ │ ├── basics.rst │ │ │ │ ├── benchmark.py │ │ │ │ ├── benchmark.rst │ │ │ │ ├── changelog.rst │ │ │ │ ├── classes.rst │ │ │ │ ├── compiling.rst │ │ │ │ ├── conf.py │ │ │ │ ├── faq.rst │ │ │ │ ├── index.rst │ │ │ │ ├── intro.rst │ │ │ │ ├── limitations.rst │ │ │ │ ├── pybind11-logo.png │ │ │ │ ├── pybind11_vs_boost_python1.png │ │ │ │ ├── pybind11_vs_boost_python1.svg │ │ │ │ ├── pybind11_vs_boost_python2.png │ │ │ │ ├── pybind11_vs_boost_python2.svg │ │ │ │ ├── reference.rst │ │ │ │ ├── release.rst │ │ │ │ ├── requirements.txt │ │ │ │ └── upgrade.rst │ │ │ ├── include │ │ │ │ └── pybind11 │ │ │ │ │ ├── attr.h │ │ │ │ │ ├── buffer_info.h │ │ │ │ │ ├── cast.h │ │ │ │ │ ├── chrono.h │ │ │ │ │ ├── common.h │ │ │ │ │ ├── complex.h │ │ │ │ │ ├── detail │ │ │ │ │ ├── class.h │ │ │ │ │ ├── common.h │ │ │ │ │ ├── descr.h │ │ │ │ │ ├── init.h │ │ │ │ │ ├── internals.h │ │ │ │ │ └── typeid.h │ │ │ │ │ ├── eigen.h │ │ │ │ │ ├── embed.h │ │ │ │ │ ├── eval.h │ │ │ │ │ ├── functional.h │ │ │ │ │ ├── iostream.h │ │ │ │ │ ├── numpy.h │ │ │ │ │ ├── operators.h │ │ │ │ │ ├── options.h │ │ │ │ │ ├── pybind11.h │ │ │ │ │ ├── pytypes.h │ │ │ │ │ ├── stl.h │ │ │ │ │ └── stl_bind.h │ │ │ ├── pybind11 │ │ │ │ ├── __init__.py │ │ │ │ ├── __main__.py │ │ │ │ └── _version.py │ │ │ ├── setup.cfg │ │ │ ├── setup.py │ │ │ ├── tests │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── conftest.py │ │ │ │ ├── constructor_stats.h │ │ │ │ ├── cross_module_gil_utils.cpp │ │ │ │ ├── local_bindings.h │ │ │ │ ├── object.h │ │ │ │ ├── pybind11_cross_module_tests.cpp │ │ │ │ ├── pybind11_tests.cpp │ │ │ │ ├── pybind11_tests.h │ │ │ │ ├── pytest.ini │ │ │ │ ├── test_async.cpp │ │ │ │ ├── test_async.py │ │ │ │ ├── test_buffers.cpp │ │ │ │ ├── test_buffers.py │ │ │ │ ├── test_builtin_casters.cpp │ │ │ │ ├── test_builtin_casters.py │ │ │ │ ├── test_call_policies.cpp │ │ │ │ ├── test_call_policies.py │ │ │ │ ├── test_callbacks.cpp │ │ │ │ ├── test_callbacks.py │ │ │ │ ├── test_chrono.cpp │ │ │ │ ├── test_chrono.py │ │ │ │ ├── test_class.cpp │ │ │ │ ├── test_class.py │ │ │ │ ├── test_cmake_build │ │ │ │ │ ├── CMakeLists.txt │ │ │ │ │ ├── embed.cpp │ │ │ │ │ ├── installed_embed │ │ │ │ │ │ └── CMakeLists.txt │ │ │ │ │ ├── installed_function │ │ │ │ │ │ └── CMakeLists.txt │ │ │ │ │ ├── installed_target │ │ │ │ │ │ └── CMakeLists.txt │ │ │ │ │ ├── main.cpp │ │ │ │ │ ├── subdirectory_embed │ │ │ │ │ │ └── CMakeLists.txt │ │ │ │ │ ├── subdirectory_function │ │ │ │ │ │ └── CMakeLists.txt │ │ │ │ │ ├── subdirectory_target │ │ │ │ │ │ └── CMakeLists.txt │ │ │ │ │ └── test.py │ │ │ │ ├── test_constants_and_functions.cpp │ │ │ │ ├── test_constants_and_functions.py │ │ │ │ ├── test_copy_move.cpp │ │ │ │ ├── test_copy_move.py │ │ │ │ ├── test_custom_type_casters.cpp │ │ │ │ ├── test_custom_type_casters.py │ │ │ │ ├── test_docstring_options.cpp │ │ │ │ ├── test_docstring_options.py │ │ │ │ ├── test_eigen.cpp │ │ │ │ ├── test_eigen.py │ │ │ │ ├── test_embed │ │ │ │ │ ├── CMakeLists.txt │ │ │ │ │ ├── catch.cpp │ │ │ │ │ ├── external_module.cpp │ │ │ │ │ ├── test_interpreter.cpp │ │ │ │ │ └── test_interpreter.py │ │ │ │ ├── test_enum.cpp │ │ │ │ ├── test_enum.py │ │ │ │ ├── test_eval.cpp │ │ │ │ ├── test_eval.py │ │ │ │ ├── test_eval_call.py │ │ │ │ ├── test_exceptions.cpp │ │ │ │ ├── test_exceptions.py │ │ │ │ ├── test_factory_constructors.cpp │ │ │ │ ├── test_factory_constructors.py │ │ │ │ ├── test_gil_scoped.cpp │ │ │ │ ├── test_gil_scoped.py │ │ │ │ ├── test_iostream.cpp │ │ │ │ ├── test_iostream.py │ │ │ │ ├── test_kwargs_and_defaults.cpp │ │ │ │ ├── test_kwargs_and_defaults.py │ │ │ │ ├── test_local_bindings.cpp │ │ │ │ ├── test_local_bindings.py │ │ │ │ ├── test_methods_and_attributes.cpp │ │ │ │ ├── test_methods_and_attributes.py │ │ │ │ ├── test_modules.cpp │ │ │ │ ├── test_modules.py │ │ │ │ ├── test_multiple_inheritance.cpp │ │ │ │ ├── test_multiple_inheritance.py │ │ │ │ ├── test_numpy_array.cpp │ │ │ │ ├── test_numpy_array.py │ │ │ │ ├── test_numpy_dtypes.cpp │ │ │ │ ├── test_numpy_dtypes.py │ │ │ │ ├── test_numpy_vectorize.cpp │ │ │ │ ├── test_numpy_vectorize.py │ │ │ │ ├── test_opaque_types.cpp │ │ │ │ ├── test_opaque_types.py │ │ │ │ ├── test_operator_overloading.cpp │ │ │ │ ├── test_operator_overloading.py │ │ │ │ ├── test_pickling.cpp │ │ │ │ ├── test_pickling.py │ │ │ │ ├── test_pytypes.cpp │ │ │ │ ├── test_pytypes.py │ │ │ │ ├── test_sequences_and_iterators.cpp │ │ │ │ ├── test_sequences_and_iterators.py │ │ │ │ ├── test_smart_ptr.cpp │ │ │ │ ├── test_smart_ptr.py │ │ │ │ ├── test_stl.cpp │ │ │ │ ├── test_stl.py │ │ │ │ ├── test_stl_binders.cpp │ │ │ │ ├── test_stl_binders.py │ │ │ │ ├── test_tagbased_polymorphic.cpp │ │ │ │ ├── test_tagbased_polymorphic.py │ │ │ │ ├── test_union.cpp │ │ │ │ ├── test_union.py │ │ │ │ ├── test_virtual_functions.cpp │ │ │ │ └── test_virtual_functions.py │ │ │ └── tools │ │ │ │ ├── FindCatch.cmake │ │ │ │ ├── FindEigen3.cmake │ │ │ │ ├── FindPythonLibsNew.cmake │ │ │ │ ├── check-style.sh │ │ │ │ ├── libsize.py │ │ │ │ ├── mkdoc.py │ │ │ │ ├── pybind11Config.cmake.in │ │ │ │ └── pybind11Tools.cmake │ │ ├── setup.py │ │ └── src │ │ │ ├── attention_cuda.cpp │ │ │ ├── attention_kernel.cu │ │ │ └── attention_kernel.h │ ├── __init__.py │ ├── baseline.py │ ├── network.py │ └── sync_batchnorm │ │ ├── __init__.py │ │ ├── batchnorm.py │ │ ├── batchnorm_reimpl.py │ │ ├── comm.py │ │ ├── replicate.py │ │ └── unittest.py ├── myevaluate.py ├── parser.py ├── plot_hist.py ├── train_4cor.py ├── update.py └── utils.py ├── scripts ├── global │ ├── eval.sbatch │ ├── eval_anyloc.sbatch │ ├── eval_satellite_translation_exclude_dense.sbatch │ ├── train_bing_thermal_partial_resnet50_gem_extended.sbatch │ ├── train_bing_thermal_partial_resnet50_gem_extended_DANN.sbatch │ ├── train_bing_thermal_partial_resnet50_netvlad_extended.sbatch │ ├── train_bing_thermal_partial_resnet50_netvlad_extended_DANN.sbatch │ ├── train_bing_thermal_translation_100.sbatch │ └── train_bing_thermal_translation_100_nocontrast.sbatch ├── keypoint │ ├── eval_loftr.sbatch │ ├── eval_r2d2.sbatch │ ├── train_50_r2d2.sbatch │ └── train_50_r2d2_loss.sbatch └── local_largest_1536 │ ├── eval_local_sparse_extended_2_ihn.sbatch │ ├── eval_local_sparse_extended_2_sthn.sbatch │ ├── eval_local_sparse_extended_2_ue1_c_ihn.sbatch │ ├── eval_local_sparse_extended_2_ue1_c_sthn.sbatch │ ├── eval_local_sparse_extended_2_ue1_c_sthn_val.sbatch │ ├── eval_local_sparse_extended_2_ue1_ce_ihn.sbatch │ ├── eval_local_sparse_extended_2_ue1_ce_sthn.sbatch │ ├── eval_local_sparse_extended_2_ue1_ce_val.sbatch │ ├── eval_local_sparse_extended_2_ue1_d_ihn.sbatch │ ├── eval_local_sparse_extended_2_ue1_d_sthn.sbatch │ ├── eval_local_sparse_extended_2_ue1_e_ihn.sbatch │ ├── eval_local_sparse_extended_2_ue1_e_sthn.sbatch │ ├── eval_local_sparse_extended_2_ue1_e_sthn_val.sbatch │ ├── eval_local_sparse_extended_2_val_ihn.sbatch │ ├── eval_local_sparse_extended_2_val_sthn.sbatch │ ├── train_local_sparse_extended_long_ihn.sbatch │ ├── train_local_sparse_extended_long_ihn_c_ue1r32.sbatch │ ├── train_local_sparse_extended_long_ihn_c_ue1r32_aug.sbatch │ ├── train_local_sparse_extended_long_ihn_d.sbatch │ ├── train_local_sparse_extended_long_load_f_aug64_c_ue1r32.sbatch │ ├── train_local_sparse_extended_long_load_f_aug64_c_ue1r32_aug.sbatch │ ├── train_local_sparse_extended_long_load_f_aug64_sthn.sbatch │ └── train_local_sparse_extended_long_load_f_aug64_sthn_d.sbatch ├── train_global.sh ├── train_local_ue.sh ├── transform_dataset.sh ├── utils ├── compare.py └── plotting.py ├── visualization_h5.ipynb └── visualization_map.ipynb /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/.gitmodules -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 ARPL 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/__init__.py -------------------------------------------------------------------------------- /assets/grid_random_5_10_hist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/assets/grid_random_5_10_hist.png -------------------------------------------------------------------------------- /assets/grid_random_5_10_roc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/assets/grid_random_5_10_roc.png -------------------------------------------------------------------------------- /cache/test_128_pairs.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/test_128_pairs.pth -------------------------------------------------------------------------------- /cache/test_256_pairs.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/test_256_pairs.pth -------------------------------------------------------------------------------- /cache/test_512_pairs.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/test_512_pairs.pth -------------------------------------------------------------------------------- /cache/test_STGL_pairs.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/test_STGL_pairs.pth -------------------------------------------------------------------------------- /cache/val_128_pairs.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/val_128_pairs.pth -------------------------------------------------------------------------------- /cache/val_256_pairs.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/val_256_pairs.pth -------------------------------------------------------------------------------- /cache/val_512_pairs.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/val_512_pairs.pth -------------------------------------------------------------------------------- /cache/vocabulary/dinov2_vitg14/l31_value_c32/thermal/c_centers.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/cache/vocabulary/dinov2_vitg14/l31_value_c32/thermal/c_centers.pt -------------------------------------------------------------------------------- /env.yml: -------------------------------------------------------------------------------- 1 | name: UASTHN 2 | channels: 3 | - defaults 4 | - xformers 5 | - pytorch 6 | - nvidia 7 | - conda-forge 8 | dependencies: 9 | - python=3.10 10 | - pytorch::pytorch=2.4 11 | - pytorch::pytorch-cuda=12.1 12 | - pytorch::torchvision=0.19 13 | - xformers 14 | - pip 15 | - matplotlib 16 | - ipywidgets 17 | - h5py 18 | - kornia 19 | - scikit-image 20 | - pip: 21 | - faiss-gpu==1.7.2 22 | - pandas==2.1.3 23 | - prettytable==3.9.0 24 | - pytorch-lightning==2.1.2 25 | - pytorch-metric-learning==2.3.0 26 | - torchmetrics==1.2.0 27 | - wandb 28 | - opencv-python-headless 29 | - transformers 30 | - googledrivedownloader 31 | - timm 32 | - einops -------------------------------------------------------------------------------- /eval_global.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | eval "$(conda shell.bash hook)" 4 | conda activate UAGL 5 | 6 | # # netvlad DANN 7 | # python3 global_pipeline/eval.py --resume='logs/global_retrieval/satellite_0_thermalmapping_135_contrast_dense_exclusion-2024-02-19_12-10-07-dd8b1b8b-d529-4277-b96c-2480b813eb69/best_model.pth' --dataset_name=satellite_0_thermalmapping_135 --datasets_folder ./datasets --aggregation netvlad --infer_batch_size 16 --prior_location_threshold=512 --backbone resnet50conv4 --fc_output_dim 4096 --G_contrast manual 8 | 9 | # # netvlad 10 | # python3 global_pipeline/eval.py --resume='logs/global_retrieval/satellite_0_thermalmapping_135_contrast_dense_exclusion-2024-02-19_12-37-10-32caa09a-06c0-4549-a30e-f1e99424ed16/best_model.pth' --dataset_name=satellite_0_thermalmapping_135 --datasets_folder ./datasets --aggregation netvlad --infer_batch_size 16 --prior_location_threshold=512 --backbone resnet50conv4 --fc_output_dim 4096 --G_contrast manual 11 | 12 | # # gem DANN 13 | # python3 global_pipeline/eval.py --resume='logs/global_retrieval/satellite_0_thermalmapping_135_contrast_dense_exclusion-2024-02-14_23-02-31-91400d55-5881-48e5-b6cb-cecff4f47a3f/best_model.pth' --dataset_name=satellite_0_thermalmapping_135 --datasets_folder ./datasets --aggregation gem --infer_batch_size 16 --prior_location_threshold=512 --backbone resnet50conv4 --fc_output_dim 4096 --G_contrast manual 14 | 15 | # # gem 16 | # python3 global_pipeline/eval.py --resume='logs/global_retrieval/satellite_0_thermalmapping_135_contrast_dense_exclusion-2024-02-14_23-05-05-be2c36a5-1841-4667-a95d-05d7cc0a7472/best_model.pth' --dataset_name=satellite_0_thermalmapping_135 --datasets_folder ./datasets --aggregation gem --infer_batch_size 16 --prior_location_threshold=512 --backbone resnet50conv4 --fc_output_dim 4096 --G_contrast manual -------------------------------------------------------------------------------- /generate_pairs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 128 4 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_largest_ori_train --identity --lev0 --database_size 1536 --corr_level 4 --generate_test_pairs --val_positive_dist_threshold 128 5 | 6 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_largest_ori_train --identity --database_size 1536 --test --val_positive_dist_threshold 128 7 | 8 | # 256 9 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_largest_ori_train --identity --lev0 --database_size 1536 --corr_level 4 --generate_test_pairs --val_positive_dist_threshold 256 10 | 11 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_largest_ori_train --identity --database_size 1536 --test --val_positive_dist_threshold 256 12 | 13 | # 512 14 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_largest_ori_train --identity --lev0 --database_size 1536 --corr_level 4 --generate_test_pairs --val_positive_dist_threshold 512 15 | 16 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_largest_ori_train --identity --lev0 --database_size 1536 --corr_level 4 --test --generate_test_pairs --val_positive_dist_threshold 512 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /global_pipeline/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 ARPL 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /global_pipeline/LICENSE_FOR_REFERENCE: -------------------------------------------------------------------------------- 1 | --------------------------- LICENSE FOR deep-visual-geo-localization-benchmark -------------------------------- 2 | MIT License 3 | 4 | Copyright (c) 2016-2019 VRG, CTU Prague 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | 24 | --------------------------- LICENSE FOR DANN -------------------------------- 25 | MIT License 26 | 27 | Copyright (c) 2019 fungtion 28 | 29 | Permission is hereby granted, free of charge, to any person obtaining a copy 30 | of this software and associated documentation files (the "Software"), to deal 31 | in the Software without restriction, including without limitation the rights 32 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 33 | copies of the Software, and to permit persons to whom the Software is 34 | furnished to do so, subject to the following conditions: 35 | 36 | The above copyright notice and this permission notice shall be included in all 37 | copies or substantial portions of the Software. 38 | 39 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 40 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 41 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 42 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 43 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 44 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 45 | SOFTWARE. -------------------------------------------------------------------------------- /global_pipeline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/global_pipeline/__init__.py -------------------------------------------------------------------------------- /global_pipeline/anyloc/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, AnyLoc 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /global_pipeline/anyloc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/global_pipeline/anyloc/__init__.py -------------------------------------------------------------------------------- /global_pipeline/commons.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains some functions and classes which can be useful in very diverse projects. 3 | """ 4 | 5 | import os 6 | import sys 7 | import torch 8 | import random 9 | import logging 10 | import traceback 11 | import numpy as np 12 | from os.path import join 13 | 14 | 15 | def make_deterministic(seed=0): 16 | """Make results deterministic. If seed == -1, do not make deterministic. 17 | Running the script in a deterministic way might slow it down. 18 | """ 19 | if seed == -1: 20 | return 21 | random.seed(seed) 22 | np.random.seed(seed) 23 | torch.manual_seed(seed) 24 | torch.cuda.manual_seed(seed) 25 | torch.backends.cudnn.deterministic = True 26 | torch.backends.cudnn.benchmark = False 27 | 28 | 29 | def setup_logging( 30 | save_dir, console="debug", info_filename="info.log", debug_filename="debug.log" 31 | ): 32 | """Set up logging files and console output. 33 | Creates one file for INFO logs and one for DEBUG logs. 34 | Args: 35 | save_dir (str): creates the folder where to save the files. 36 | debug (str): 37 | if == "debug" prints on console debug messages and higher 38 | if == "info" prints on console info messages and higher 39 | if == None does not use console (useful when a logger has already been set) 40 | info_filename (str): the name of the info file. if None, don't create info file 41 | debug_filename (str): the name of the debug file. if None, don't create debug file 42 | """ 43 | if os.path.exists(save_dir): 44 | raise FileExistsError(f"{save_dir} already exists!") 45 | os.makedirs(save_dir, exist_ok=True) 46 | # logging.Logger.manager.loggerDict.keys() to check which loggers are in use 47 | base_formatter = logging.Formatter( 48 | "%(asctime)s %(message)s", "%Y-%m-%d %H:%M:%S") 49 | logger = logging.getLogger("") 50 | logger.setLevel(logging.DEBUG) 51 | 52 | if info_filename != None: 53 | info_file_handler = logging.FileHandler(join(save_dir, info_filename)) 54 | info_file_handler.setLevel(logging.INFO) 55 | info_file_handler.setFormatter(base_formatter) 56 | logger.addHandler(info_file_handler) 57 | 58 | if debug_filename != None: 59 | debug_file_handler = logging.FileHandler( 60 | join(save_dir, debug_filename)) 61 | debug_file_handler.setLevel(logging.DEBUG) 62 | debug_file_handler.setFormatter(base_formatter) 63 | logger.addHandler(debug_file_handler) 64 | 65 | if console != None: 66 | console_handler = logging.StreamHandler() 67 | if console == "debug": 68 | console_handler.setLevel(logging.DEBUG) 69 | if console == "info": 70 | console_handler.setLevel(logging.INFO) 71 | console_handler.setFormatter(base_formatter) 72 | logger.addHandler(console_handler) 73 | 74 | def exception_handler(type_, value, tb): 75 | logger.info("\n" + "".join(traceback.format_exception(type, value, tb))) 76 | 77 | sys.excepthook = exception_handler 78 | -------------------------------------------------------------------------------- /global_pipeline/eval_pix2pix.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | import parser 5 | import logging 6 | import sklearn 7 | from os.path import join 8 | from datetime import datetime 9 | from torch.utils.model_zoo import load_url 10 | from google_drive_downloader import GoogleDriveDownloader as gdd 11 | import copy 12 | 13 | import test 14 | import util 15 | import commons 16 | import datasets_ws 17 | from model import network 18 | 19 | ######################################### SETUP ######################################### 20 | args = parser.parse_arguments() 21 | start_time = datetime.now() 22 | args.save_dir = join( 23 | "test", 24 | args.save_dir, 25 | f"{args.dataset_name}-{start_time.strftime('%Y-%m-%d_%H-%M-%S')}", 26 | ) 27 | commons.setup_logging(args.save_dir) 28 | commons.make_deterministic(args.seed) 29 | logging.info(f"Arguments: {args}") 30 | logging.info(f"The outputs are being saved in {args.save_dir}") 31 | 32 | ######################################### MODEL ######################################### 33 | model = network.pix2pix(args, 3, 1) 34 | 35 | if args.resume is not None: 36 | logging.info(f"Resuming model from {args.resume}") 37 | model = util.resume_model_pix2pix(args, model) 38 | # Enable DataParallel after loading checkpoint, otherwise doing it before 39 | # would append "module." in front of the keys of the state dict triggering errors 40 | 41 | model.setup() 42 | 43 | ######################################### DATASETS ######################################### 44 | test_ds = datasets_ws.TranslationDataset( 45 | args, args.datasets_folder, args.dataset_name, "test") 46 | logging.info(f"Test set: {test_ds}") 47 | 48 | ######################################### TEST on TEST SET ######################################### 49 | recalls, recalls_str = test.test_translation_pix2pix(args, test_ds, model) 50 | logging.info(f"PSNR on {test_ds}: {recalls_str}") 51 | 52 | logging.info(f"Finished in {str(datetime.now() - start_time)[:-7]}") 53 | -------------------------------------------------------------------------------- /global_pipeline/eval_pix2pix_generate_h5.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | import parser 5 | import logging 6 | import sklearn 7 | from os.path import join 8 | from datetime import datetime 9 | from torch.utils.model_zoo import load_url 10 | from google_drive_downloader import GoogleDriveDownloader as gdd 11 | import copy 12 | 13 | import test 14 | import util 15 | import commons 16 | import datasets_ws 17 | from model import network 18 | 19 | ######################################### SETUP ######################################### 20 | args = parser.parse_arguments() 21 | start_time = datetime.now() 22 | args.save_dir = join( 23 | "test", 24 | args.save_dir, 25 | f"{args.dataset_name}-{start_time.strftime('%Y-%m-%d_%H-%M-%S')}", 26 | ) 27 | commons.setup_logging(args.save_dir) 28 | commons.make_deterministic(args.seed) 29 | logging.info(f"Arguments: {args}") 30 | logging.info(f"The outputs are being saved in {args.save_dir}") 31 | 32 | ######################################### MODEL ######################################### 33 | model = network.pix2pix(args, 3, 1) 34 | 35 | if args.resume is not None: 36 | logging.info(f"Resuming model from {args.resume}") 37 | model = util.resume_model_pix2pix(args, model) 38 | # Enable DataParallel after loading checkpoint, otherwise doing it before 39 | # would append "module." in front of the keys of the state dict triggering errors 40 | 41 | model.setup() 42 | 43 | ######################################### DATASETS ######################################### 44 | train_ds = datasets_ws.TranslationDataset( 45 | args, args.datasets_folder, args.dataset_name, "train", loading_queries=False) 46 | logging.info(f"Train set: {train_ds}") 47 | 48 | ######################################### TEST on TEST SET ######################################### 49 | test.test_translation_pix2pix_generate_h5(args, train_ds, model) 50 | 51 | logging.info(f"Finished in {str(datetime.now() - start_time)[:-7]}") 52 | -------------------------------------------------------------------------------- /global_pipeline/eval_pix2pix_generate_h5_exclude.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | import parser 5 | import logging 6 | import sklearn 7 | from os.path import join 8 | from datetime import datetime 9 | from torch.utils.model_zoo import load_url 10 | from google_drive_downloader import GoogleDriveDownloader as gdd 11 | import copy 12 | 13 | import test 14 | import util 15 | import commons 16 | import datasets_ws 17 | from model import network 18 | 19 | ######################################### SETUP ######################################### 20 | args = parser.parse_arguments() 21 | start_time = datetime.now() 22 | args.save_dir = join( 23 | "test", 24 | args.save_dir, 25 | f"{args.dataset_name}-{start_time.strftime('%Y-%m-%d_%H-%M-%S')}", 26 | ) 27 | commons.setup_logging(args.save_dir) 28 | commons.make_deterministic(args.seed) 29 | logging.info(f"Arguments: {args}") 30 | logging.info(f"The outputs are being saved in {args.save_dir}") 31 | 32 | ######################################### MODEL ######################################### 33 | model = network.pix2pix(args, 3, 1) 34 | 35 | if args.resume is not None: 36 | logging.info(f"Resuming model from {args.resume}") 37 | model = util.resume_model_pix2pix(args, model) 38 | # Enable DataParallel after loading checkpoint, otherwise doing it before 39 | # would append "module." in front of the keys of the state dict triggering errors 40 | 41 | model.setup() 42 | 43 | ######################################### DATASETS ######################################### 44 | train_ds = datasets_ws.TranslationDataset( 45 | args, args.datasets_folder, args.dataset_name, "train", loading_queries=False) 46 | logging.info(f"Train set: {train_ds}") 47 | 48 | TB_test_region = [270, 1400, 2710, 9400] 49 | ######################################### TEST on TEST SET ######################################### 50 | test.test_translation_pix2pix_generate_h5(args, train_ds, model, exclude_test_region=TB_test_region) 51 | 52 | logging.info(f"Finished in {str(datetime.now() - start_time)[:-7]}") -------------------------------------------------------------------------------- /global_pipeline/folder_config.yml: -------------------------------------------------------------------------------- 1 | satellite: 2 | name: 20201117_west_of_rimah 3 | maps: 4 | - 20201117_west_of_rimah_BingSatellite.tif 5 | - 20201117_west_of_rimah_ESRI Satellite.tif 6 | - 20201117_west_of_rimah_ESRI_Satellite.tif 7 | - 20201117_west_of_rimah_GoogleSatellite.tif 8 | - 20201117_west_of_rimah_Yandex Satellite.tif 9 | - 20210609_west_of_rimah_first_night_mapping_flight.tif 10 | valid_regions: 11 | - [0, 0, 9088, 23744] 12 | - [0, 0, 9088, 23744] 13 | - [0, 0, 9088, 23744] 14 | - [0, 0, 9088, 23744] 15 | sirmionemapping: 16 | name: 20210519_west_of_rimah_SirmioneMapping_CROP 17 | maps: 18 | - 20210519_12-35-36_west_of_rimah_SirmioneMapping_CROP.tif 19 | - 20210519_14-28-31_west_of_rimah_SirmioneMapping_CROP.tif 20 | thermalmapping: 21 | name: 20210620_west_of_rimah_thermal_mapping 22 | maps: 23 | - 20210620_west_of_rimah_thermal_mapping_flight1_21h22m.tif 24 | - 20210620_west_of_rimah_thermal_mapping_flight2_22h50m.tif 25 | - 20210620_west_of_rimah_thermal_mapping_flight3_00h29m.tif 26 | - 20210620_west_of_rimah_thermal_mapping_flight4_01h43m.tif 27 | - 20210620_west_of_rimah_thermal_mapping_flight5_02h45m.tif 28 | - 20210620_west_of_rimah_thermal_mapping_flight6_03h42m.tif 29 | - 20210609_west_of_rimah_first_night_mapping_flight.tif 30 | valid_regions: 31 | - [750, 1500, 2500, 9400] 32 | - [2700, 1800, 5000, 9500] 33 | - [270, 1500, 2700, 9400] 34 | - [2700, 1800, 4200, 9450] 35 | - [1040, 1400, 2710, 9400] 36 | - [2650, 1800, 5100, 9350] 37 | thermalmappingDJI: 38 | name: 202302_2023_03_west_of_rimah_DJI 39 | maps: 40 | - 202302_west_of_rimah_daytime.tif 41 | - 202303_west_of_rimah_nighttime.tif 42 | - 202303_west_of_rimah_RGB_old.tif 43 | - 202303_west_of_rimah_RGB.tif 44 | valid_regions: 45 | - [650, 1700, 4500, 9250] 46 | - [600, 1900, 4500, 9250] 47 | - [600, 1900, 4500, 9250] 48 | - [600, 1900, 4500, 9250] 49 | foxtechmapping: 50 | name: 20220603_west_of_rimah_foxtech_mapping 51 | maps: 52 | - 20220603_west_of_rimah_foxtech_mapping_2h30pm.tif 53 | - 20220603_west_of_rimah_foxtech_mapping_9h00am.tif 54 | - 20220603_west_of_rimah_foxtech_mapping_11h30am.tif 55 | valid_regions: 56 | - [750, 550, 4600, 6600] 57 | ADASI: 58 | name: 20221216_ADASI_weekday_notam 59 | maps: 60 | - ADASI_weekday_notam_Bing Aerial_rotated.tif 61 | valid_regions: 62 | - [950, 7350, 10200, 9225] 63 | ADASIthermal: 64 | name: 20221216_ADASI_weekday_notam_thermal_mapping 65 | maps: 66 | - ADASI_weekday_notam_DJIMapping_16-12-2022-14PM_north_part_rotated.tif 67 | valid_regions: 68 | - [950, 7350, 10200, 9225] -------------------------------------------------------------------------------- /global_pipeline/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/global_pipeline/model/__init__.py -------------------------------------------------------------------------------- /global_pipeline/model/cct/__init__.py: -------------------------------------------------------------------------------- 1 | from .cct import cct_14_7x2_384, cct_14_7x2_224 -------------------------------------------------------------------------------- /global_pipeline/model/cct/embedder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class Embedder(nn.Module): 5 | def __init__(self, 6 | word_embedding_dim=300, 7 | vocab_size=100000, 8 | padding_idx=1, 9 | pretrained_weight=None, 10 | embed_freeze=False, 11 | *args, **kwargs): 12 | super(Embedder, self).__init__() 13 | self.embeddings = nn.Embedding.from_pretrained(pretrained_weight, freeze=embed_freeze) \ 14 | if pretrained_weight is not None else \ 15 | nn.Embedding(vocab_size, word_embedding_dim, padding_idx=padding_idx) 16 | self.embeddings.weight.requires_grad = not embed_freeze 17 | 18 | def forward_mask(self, mask): 19 | bsz, seq_len = mask.shape 20 | new_mask = mask.view(bsz, seq_len, 1) 21 | new_mask = new_mask.sum(-1) 22 | new_mask = (new_mask > 0) 23 | return new_mask 24 | 25 | def forward(self, x, mask=None): 26 | embed = self.embeddings(x) 27 | embed = embed if mask is None else embed * self.forward_mask(mask).unsqueeze(-1).float() 28 | return embed, mask 29 | 30 | @staticmethod 31 | def init_weight(m): 32 | if isinstance(m, nn.Linear): 33 | nn.init.trunc_normal_(m.weight, std=.02) 34 | if isinstance(m, nn.Linear) and m.bias is not None: 35 | nn.init.constant_(m.bias, 0) 36 | else: 37 | nn.init.normal_(m.weight) 38 | -------------------------------------------------------------------------------- /global_pipeline/model/cct/helpers.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn.functional as F 4 | 5 | 6 | def resize_pos_embed(posemb, posemb_new, num_tokens=1): 7 | # Copied from `timm` by Ross Wightman: 8 | # github.com/rwightman/pytorch-image-models 9 | # Rescale the grid of position embeddings when loading from state_dict. Adapted from 10 | # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 11 | ntok_new = posemb_new.shape[1] 12 | if num_tokens: 13 | posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] 14 | ntok_new -= num_tokens 15 | else: 16 | posemb_tok, posemb_grid = posemb[:, :0], posemb[0] 17 | gs_old = int(math.sqrt(len(posemb_grid))) 18 | gs_new = int(math.sqrt(ntok_new)) 19 | posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) 20 | posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear') 21 | posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1) 22 | posemb = torch.cat([posemb_tok, posemb_grid], dim=1) 23 | return posemb 24 | 25 | 26 | def pe_check(model, state_dict, pe_key='classifier.positional_emb'): 27 | if pe_key is not None and pe_key in state_dict.keys() and pe_key in model.state_dict().keys(): 28 | if model.state_dict()[pe_key].shape != state_dict[pe_key].shape: 29 | state_dict[pe_key] = resize_pos_embed(state_dict[pe_key], 30 | model.state_dict()[pe_key], 31 | num_tokens=model.classifier.num_tokens) 32 | return state_dict 33 | -------------------------------------------------------------------------------- /global_pipeline/model/cct/stochastic_depth.py: -------------------------------------------------------------------------------- 1 | # Thanks to rwightman's timm package 2 | # github.com:rwightman/pytorch-image-models 3 | 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | def drop_path(x, drop_prob: float = 0., training: bool = False): 9 | """ 10 | Obtained from: github.com:rwightman/pytorch-image-models 11 | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). 12 | This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, 13 | the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... 14 | See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for 15 | changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 16 | 'survival rate' as the argument. 17 | """ 18 | if drop_prob == 0. or not training: 19 | return x 20 | keep_prob = 1 - drop_prob 21 | shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets 22 | random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) 23 | random_tensor.floor_() # binarize 24 | output = x.div(keep_prob) * random_tensor 25 | return output 26 | 27 | 28 | class DropPath(nn.Module): 29 | """ 30 | Obtained from: github.com:rwightman/pytorch-image-models 31 | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). 32 | """ 33 | 34 | def __init__(self, drop_prob=None): 35 | super(DropPath, self).__init__() 36 | self.drop_prob = drop_prob 37 | 38 | def forward(self, x): 39 | return drop_path(x, self.drop_prob, self.training) 40 | -------------------------------------------------------------------------------- /global_pipeline/model/non_local.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import einops 4 | 5 | 6 | class NonLocalBlock(nn.Module): 7 | def __init__(self, channel_feat, channel_inner, gamma=1): 8 | super().__init__() 9 | self.q_conv = nn.Conv2d(in_channels=channel_feat, 10 | out_channels=channel_inner, 11 | kernel_size=1) 12 | self.k_conv = nn.Conv2d(in_channels=channel_feat, 13 | out_channels=channel_inner, 14 | kernel_size=1) 15 | self.v_conv = nn.Conv2d(in_channels=channel_feat, 16 | out_channels=channel_inner, 17 | kernel_size=1) 18 | self.merge_conv = nn.Conv2d(in_channels=channel_inner, 19 | out_channels=channel_feat, 20 | kernel_size=1) 21 | self.gamma = gamma 22 | 23 | def forward(self, x): 24 | b, c, h, w = x.shape[:] 25 | q_tensor = self.q_conv(x) 26 | k_tensor = self.k_conv(x) 27 | v_tensor = self.v_conv(x) 28 | 29 | q_tensor = einops.rearrange(q_tensor, 'b c h w -> b c (h w)') 30 | k_tensor = einops.rearrange(k_tensor, 'b c h w -> b c (h w)') 31 | v_tensor = einops.rearrange(v_tensor, 'b c h w -> b c (h w)') 32 | 33 | qk_tensor = torch.einsum('b c i, b c j -> b i j', q_tensor, k_tensor) # where i = j = (h * w) 34 | attention = torch.softmax(qk_tensor, -1) 35 | out = torch.einsum('b n i, b c i -> b c n', attention, v_tensor) 36 | out = einops.rearrange(out, 'b c (h w) -> b c h w', h=h, w=w) 37 | out = self.merge_conv(out) 38 | out = self.gamma * out + x 39 | return out 40 | -------------------------------------------------------------------------------- /global_pipeline/model/normalization.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | class L2Norm(nn.Module): 6 | def __init__(self, dim=1): 7 | super().__init__() 8 | self.dim = dim 9 | def forward(self, x): 10 | return F.normalize(x, p=2, dim=self.dim) 11 | 12 | -------------------------------------------------------------------------------- /global_pipeline/model/pix2pix_networks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/global_pipeline/model/pix2pix_networks/__init__.py -------------------------------------------------------------------------------- /global_pipeline/model/sync_batchnorm/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : __init__.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | from .batchnorm import set_sbn_eps_mode 12 | from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d 13 | from .batchnorm import patch_sync_batchnorm, convert_model 14 | from .replicate import DataParallelWithCallback, patch_replication_callback 15 | -------------------------------------------------------------------------------- /global_pipeline/model/sync_batchnorm/batchnorm_reimpl.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : batchnorm_reimpl.py 4 | # Author : acgtyrant 5 | # Date : 11/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.init as init 14 | 15 | __all__ = ['BatchNorm2dReimpl'] 16 | 17 | 18 | class BatchNorm2dReimpl(nn.Module): 19 | """ 20 | A re-implementation of batch normalization, used for testing the numerical 21 | stability. 22 | 23 | Author: acgtyrant 24 | See also: 25 | https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14 26 | """ 27 | def __init__(self, num_features, eps=1e-5, momentum=0.1): 28 | super().__init__() 29 | 30 | self.num_features = num_features 31 | self.eps = eps 32 | self.momentum = momentum 33 | self.weight = nn.Parameter(torch.empty(num_features)) 34 | self.bias = nn.Parameter(torch.empty(num_features)) 35 | self.register_buffer('running_mean', torch.zeros(num_features)) 36 | self.register_buffer('running_var', torch.ones(num_features)) 37 | self.reset_parameters() 38 | 39 | def reset_running_stats(self): 40 | self.running_mean.zero_() 41 | self.running_var.fill_(1) 42 | 43 | def reset_parameters(self): 44 | self.reset_running_stats() 45 | init.uniform_(self.weight) 46 | init.zeros_(self.bias) 47 | 48 | def forward(self, input_): 49 | batchsize, channels, height, width = input_.size() 50 | numel = batchsize * height * width 51 | input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel) 52 | sum_ = input_.sum(1) 53 | sum_of_square = input_.pow(2).sum(1) 54 | mean = sum_ / numel 55 | sumvar = sum_of_square - sum_ * mean 56 | 57 | self.running_mean = ( 58 | (1 - self.momentum) * self.running_mean 59 | + self.momentum * mean.detach() 60 | ) 61 | unbias_var = sumvar / (numel - 1) 62 | self.running_var = ( 63 | (1 - self.momentum) * self.running_var 64 | + self.momentum * unbias_var.detach() 65 | ) 66 | 67 | bias_var = sumvar / numel 68 | inv_std = 1 / (bias_var + self.eps).pow(0.5) 69 | output = ( 70 | (input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) * 71 | self.weight.unsqueeze(1) + self.bias.unsqueeze(1)) 72 | 73 | return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous() 74 | 75 | -------------------------------------------------------------------------------- /global_pipeline/model/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : unittest.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import unittest 12 | import torch 13 | 14 | 15 | class TorchTestCase(unittest.TestCase): 16 | def assertTensorClose(self, x, y): 17 | adiff = float((x - y).abs().max()) 18 | if (y == 0).all(): 19 | rdiff = 'NaN' 20 | else: 21 | rdiff = float((adiff / y).abs().max()) 22 | 23 | message = ( 24 | 'Tensor close check failed\n' 25 | 'adiff={}\n' 26 | 'rdiff={}\n' 27 | ).format(adiff, rdiff) 28 | self.assertTrue(torch.allclose(x, y, atol=1e-5, rtol=1e-3), message) 29 | 30 | -------------------------------------------------------------------------------- /global_pipeline/plotting.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import sys 3 | import os 4 | import statistics 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import random 8 | import cv2 9 | 10 | random.seed(1) 11 | np.random.seed(1) 12 | 13 | def process_results_simulation(error_m, save_folder): 14 | res_error_m = error_m 15 | #res_error_m = [e for gt, e in zip(res_ground_truth, res_error_m) if mask[gt[1], gt[0]]>100] 16 | #res_ground_truth = [gt for gt in res_ground_truth if mask[gt[1], gt[0]]>100] 17 | 18 | if not os.path.exists(save_folder): 19 | os.makedirs(save_folder, exist_ok=True) 20 | 21 | save_filename = os.path.join(save_folder, 'matching_results.txt') 22 | f = open(save_filename, "a") 23 | 24 | total_tested = len(res_error_m) 25 | error_0 = res_error_m.count(0) 26 | f.write("Perfect matches: %d of %d (%.2f%%) \n" % (error_0, total_tested, 100*error_0/total_tested)) 27 | 28 | error_25 = sum(x <= 25 for x in res_error_m) 29 | f.write("Mismatch less or equal to 25m: %d of %d (%.2f%%) \n" % (error_25, total_tested, 100*error_25/total_tested)) 30 | 31 | error_50 = sum(x <= 50 for x in res_error_m) 32 | f.write("Mismatch less or equal to 50m: %d of %d (%.2f%%) \n" % (error_50, total_tested, 100*error_50/total_tested)) 33 | 34 | error_100 = sum(x <= 100 for x in res_error_m) 35 | f.write("Mismatch less or equal to 100m: %d of %d (%.2f%%) \n" % (error_100, total_tested, 100*error_100/total_tested)) 36 | 37 | error_150 = sum(x <= 150 for x in res_error_m) 38 | f.write("Mismatch less or equal to 150m: %d of %d (%.2f%%) \n" % (error_150, total_tested, 100*error_150/total_tested)) 39 | 40 | f.write("Mean error: %.2fm \n" % (np.mean(res_error_m))) 41 | print(f"Mean error: {np.mean(res_error_m)}") 42 | 43 | text_result = np.histogram(res_error_m, bins=16, range=[0, 512]) 44 | f.write(f"Historgram: {text_result}") 45 | 46 | f.close() 47 | 48 | plt.hist(res_error_m, histtype='step', bins=16, range=[0, 512]) 49 | plt.title('Histogram of L_2 Distance Error') 50 | plt.xlabel("Error") 51 | plt.ylabel("Frequency") 52 | plt.savefig(os.path.join(save_folder, 'hist_error_localization.pdf')) -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/model/sync_batchnorm/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : __init__.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | from .batchnorm import set_sbn_eps_mode 12 | from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d 13 | from .batchnorm import patch_sync_batchnorm, convert_model 14 | from .replicate import DataParallelWithCallback, patch_replication_callback 15 | -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/batchnorm.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/batchnorm.cpython-39.pyc -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/comm.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/comm.cpython-39.pyc -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/replicate.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/keypoint_pipeline/myloftr/model/sync_batchnorm/__pycache__/replicate.cpython-39.pyc -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/model/sync_batchnorm/batchnorm_reimpl.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : batchnorm_reimpl.py 4 | # Author : acgtyrant 5 | # Date : 11/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.init as init 14 | 15 | __all__ = ['BatchNorm2dReimpl'] 16 | 17 | 18 | class BatchNorm2dReimpl(nn.Module): 19 | """ 20 | A re-implementation of batch normalization, used for testing the numerical 21 | stability. 22 | 23 | Author: acgtyrant 24 | See also: 25 | https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14 26 | """ 27 | def __init__(self, num_features, eps=1e-5, momentum=0.1): 28 | super().__init__() 29 | 30 | self.num_features = num_features 31 | self.eps = eps 32 | self.momentum = momentum 33 | self.weight = nn.Parameter(torch.empty(num_features)) 34 | self.bias = nn.Parameter(torch.empty(num_features)) 35 | self.register_buffer('running_mean', torch.zeros(num_features)) 36 | self.register_buffer('running_var', torch.ones(num_features)) 37 | self.reset_parameters() 38 | 39 | def reset_running_stats(self): 40 | self.running_mean.zero_() 41 | self.running_var.fill_(1) 42 | 43 | def reset_parameters(self): 44 | self.reset_running_stats() 45 | init.uniform_(self.weight) 46 | init.zeros_(self.bias) 47 | 48 | def forward(self, input_): 49 | batchsize, channels, height, width = input_.size() 50 | numel = batchsize * height * width 51 | input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel) 52 | sum_ = input_.sum(1) 53 | sum_of_square = input_.pow(2).sum(1) 54 | mean = sum_ / numel 55 | sumvar = sum_of_square - sum_ * mean 56 | 57 | self.running_mean = ( 58 | (1 - self.momentum) * self.running_mean 59 | + self.momentum * mean.detach() 60 | ) 61 | unbias_var = sumvar / (numel - 1) 62 | self.running_var = ( 63 | (1 - self.momentum) * self.running_var 64 | + self.momentum * unbias_var.detach() 65 | ) 66 | 67 | bias_var = sumvar / numel 68 | inv_std = 1 / (bias_var + self.eps).pow(0.5) 69 | output = ( 70 | (input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) * 71 | self.weight.unsqueeze(1) + self.bias.unsqueeze(1)) 72 | 73 | return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous() 74 | 75 | -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/model/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : unittest.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import unittest 12 | import torch 13 | 14 | 15 | class TorchTestCase(unittest.TestCase): 16 | def assertTensorClose(self, x, y): 17 | adiff = float((x - y).abs().max()) 18 | if (y == 0).all(): 19 | rdiff = 'NaN' 20 | else: 21 | rdiff = float((adiff / y).abs().max()) 22 | 23 | message = ( 24 | 'Tensor close check failed\n' 25 | 'adiff={}\n' 26 | 'rdiff={}\n' 27 | ).format(adiff, rdiff) 28 | self.assertTrue(torch.allclose(x, y, atol=1e-5, rtol=1e-3), message) 29 | 30 | -------------------------------------------------------------------------------- /keypoint_pipeline/myloftr/plot_hist.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | def plot_hist_helper(path): 5 | # An "interface" to matplotlib.axes.Axes.hist() method 6 | plt.figure() 7 | data = np.load(f'{path}/resnpy.npy', allow_pickle=True) 8 | n, bins, patches = plt.hist(x=data, bins=np.linspace(0, 100, 20)) 9 | plt.title("Test MACE") 10 | plt.ylim(0, 20000) 11 | plt.xlabel("MACE") 12 | plt.ylabel("Frequency") 13 | plt.savefig(f"{path}/hist.png") 14 | plt.close() 15 | 16 | # plt.figure() 17 | # flow_data = np.load(f'{path}/flownpy.npy', allow_pickle=True) 18 | # n, bins, patches = plt.hist(x=flow_data, bins=np.linspace(0, 100, 20)) 19 | # plt.title("Test Flow") 20 | # plt.ylim(0, 20000) 21 | # plt.xlabel("Flow") 22 | # plt.ylabel("Frequency") 23 | # plt.savefig(f"{path}/flowhist.png") 24 | # plt.close() 25 | 26 | if __name__ == '__main__': 27 | path = "IHN_results/satellite_thermal_dense" 28 | plot_hist_helper(path) -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/evaluate.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | sys.path.append('core') 4 | 5 | from PIL import Image 6 | import argparse 7 | import os 8 | import numpy as np 9 | import torch 10 | import torchvision 11 | import torch.nn.functional as F 12 | from tqdm import tqdm 13 | import matplotlib.pyplot as plt 14 | 15 | import datasets_4cor_img as datasets 16 | from utils import save_overlap_img 17 | import logging 18 | 19 | def validate_process(model, args, total_steps): 20 | """ Perform evaluation on the FlyingChairs (test) split """ 21 | model.netG.eval() 22 | loss_list = [] 23 | val_loader = datasets.fetch_dataloader(args, split='val') 24 | for i_batch, data_blob in enumerate(tqdm(val_loader)): 25 | 26 | image1, image2, flow_gt, H, query_utm, database_utm, _, _ = [x for x in data_blob] 27 | 28 | if i_batch == 0: 29 | logging.info("Check the reproducibility by UTM:") 30 | logging.info(f"the first 5th query UTMs: {query_utm[:5]}") 31 | logging.info(f"the first 5th database UTMs: {database_utm[:5]}") 32 | 33 | flow_4cor = torch.zeros((flow_gt.shape[0], 2, 2, 2)) 34 | flow_4cor[:, :, 0, 0] = flow_gt[:, :, 0, 0] 35 | flow_4cor[:, :, 0, 1] = flow_gt[:, :, 0, -1] 36 | flow_4cor[:, :, 1, 0] = flow_gt[:, :, -1, 0] 37 | flow_4cor[:, :, 1, 1] = flow_gt[:, :, -1, -1] 38 | device = args.device 39 | image1 = image1.to(device) 40 | image2 = image2.to(device) 41 | model.set_input(image1, image2, flow_gt) 42 | with torch.no_grad(): 43 | model.forward() 44 | model.calculate_G() 45 | metrics = model.metrics 46 | loss_list.append(metrics['loss']) 47 | 48 | # if i_batch == 0: 49 | # # Visualize 50 | # save_overlap_img(torchvision.utils.make_grid(model.image_1, nrow=16, padding = 16, pad_value=0), 51 | # torchvision.utils.make_grid(model.fake_warped_image_2, nrow=16, padding = 16, pad_value=0), 52 | # args.save_dir + '/val_overlap_pred.png') 53 | # save_overlap_img(torchvision.utils.make_grid(model.image_1, nrow=16, padding = 16, pad_value=0), 54 | # torchvision.utils.make_grid(model.real_warped_image_2, nrow=16, padding = 16, pad_value=0), 55 | # args.save_dir + '/val_overlap_gt.png') 56 | # if args.two_stages: 57 | # save_overlap_img(torchvision.utils.make_grid(model.image_1_crop, nrow=16, padding = 16, pad_value=0), 58 | # torchvision.utils.make_grid(model.image_2, nrow=16, padding = 16, pad_value=0), 59 | # args.save_dir + '/val_overlap_crop.png') 60 | model.netG.train() 61 | print(loss_list) 62 | loss = np.mean(np.stack(loss_list)) 63 | logging.info("Validation LOSS: %f" % loss) 64 | return {'val_loss': loss} -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/model/ap_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-present NAVER Corp. 2 | # CC BY-NC-SA 3.0 3 | # Available only for non-commercial use 4 | 5 | import pdb 6 | import numpy as np 7 | import torch 8 | import torch.nn as nn 9 | 10 | 11 | class APLoss (nn.Module): 12 | """ differentiable AP loss, through quantization. 13 | 14 | Input: (N, M) values in [min, max] 15 | label: (N, M) values in {0, 1} 16 | 17 | Returns: list of query AP (for each n in {1..N}) 18 | Note: typically, you want to minimize 1 - mean(AP) 19 | """ 20 | def __init__(self, nq=25, min=0, max=1, euc=False): 21 | nn.Module.__init__(self) 22 | assert isinstance(nq, int) and 2 <= nq <= 100 23 | self.nq = nq 24 | self.min = min 25 | self.max = max 26 | self.euc = euc 27 | gap = max - min 28 | assert gap > 0 29 | 30 | # init quantizer = non-learnable (fixed) convolution 31 | self.quantizer = q = nn.Conv1d(1, 2*nq, kernel_size=1, bias=True) 32 | a = (nq-1) / gap 33 | #1st half = lines passing to (min+x,1) and (min+x+1/a,0) with x = {nq-1..0}*gap/(nq-1) 34 | q.weight.data[:nq] = -a 35 | q.bias.data[:nq] = torch.from_numpy(a*min + np.arange(nq, 0, -1)) # b = 1 + a*(min+x) 36 | #2nd half = lines passing to (min+x,1) and (min+x-1/a,0) with x = {nq-1..0}*gap/(nq-1) 37 | q.weight.data[nq:] = a 38 | q.bias.data[nq:] = torch.from_numpy(np.arange(2-nq, 2, 1) - a*min) # b = 1 - a*(min+x) 39 | # first and last one are special: just horizontal straight line 40 | q.weight.data[0] = q.weight.data[-1] = 0 41 | q.bias.data[0] = q.bias.data[-1] = 1 42 | 43 | def compute_AP(self, x, label): 44 | N, M = x.shape 45 | if self.euc: # euclidean distance in same range than similarities 46 | x = 1 - torch.sqrt(2.001 - 2*x) 47 | 48 | # quantize all predictions 49 | q = self.quantizer(x.unsqueeze(1)) 50 | q = torch.min(q[:,:self.nq], q[:,self.nq:]).clamp(min=0) # N x Q x M 51 | 52 | nbs = q.sum(dim=-1) # number of samples N x Q = c 53 | rec = (q * label.view(N,1,M).float()).sum(dim=-1) # nb of correct samples = c+ N x Q 54 | prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1)) # precision 55 | rec /= rec.sum(dim=-1).unsqueeze(1) # norm in [0,1] 56 | 57 | ap = (prec * rec).sum(dim=-1) # per-image AP 58 | return ap 59 | 60 | def forward(self, x, label): 61 | assert x.shape == label.shape # N x M 62 | return self.compute_AP(x, label) 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/model/losses.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-present NAVER Corp. 2 | # CC BY-NC-SA 3.0 3 | # Available only for non-commercial use 4 | 5 | import pdb 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | from model.sampler import * 12 | from model.repeatability_loss import * 13 | from model.reliability_loss import * 14 | 15 | 16 | class MultiLoss (nn.Module): 17 | """ Combines several loss functions for convenience. 18 | *args: [loss weight (float), loss creator, ... ] 19 | 20 | Example: 21 | loss = MultiLoss( 1, MyFirstLoss(), 0.5, MySecondLoss() ) 22 | """ 23 | def __init__(self, *args, dbg=()): 24 | nn.Module.__init__(self) 25 | assert len(args) % 2 == 0, 'args must be a list of (float, loss)' 26 | self.weights = [] 27 | self.losses = nn.ModuleList() 28 | for i in range(len(args)//2): 29 | weight = float(args[2*i+0]) 30 | loss = args[2*i+1] 31 | assert isinstance(loss, nn.Module), "%s is not a loss!" % loss 32 | self.weights.append(weight) 33 | self.losses.append(loss) 34 | 35 | def forward(self, select=None, **variables): 36 | assert not select or all(1<=n<=len(self.losses) for n in select) 37 | d = dict() 38 | cum_loss = 0 39 | for num, (weight, loss_func) in enumerate(zip(self.weights, self.losses),1): 40 | if select is not None and num not in select: continue 41 | l = loss_func(**{k:v for k,v in variables.items()}) 42 | if isinstance(l, tuple): 43 | assert len(l) == 2 and isinstance(l[1], dict) 44 | else: 45 | l = l, {loss_func.name:l} 46 | cum_loss = cum_loss + weight * l[0] 47 | for key,val in l[1].items(): 48 | d['loss_'+key] = float(val) 49 | d['loss'] = float(cum_loss) 50 | return cum_loss, d 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/model/reliability_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-present NAVER Corp. 2 | # CC BY-NC-SA 3.0 3 | # Available only for non-commercial use 4 | 5 | import pdb 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | 9 | from model.ap_loss import APLoss 10 | 11 | 12 | class PixelAPLoss (nn.Module): 13 | """ Computes the pixel-wise AP loss: 14 | Given two images and ground-truth optical flow, computes the AP per pixel. 15 | 16 | feat1: (B, C, H, W) pixel-wise features extracted from img1 17 | feat2: (B, C, H, W) pixel-wise features extracted from img2 18 | aflow: (B, 2, H, W) absolute flow: aflow[...,y1,x1] = x2,y2 19 | """ 20 | def __init__(self, sampler, nq=20): 21 | nn.Module.__init__(self) 22 | self.aploss = APLoss(nq, min=0, max=1, euc=False) 23 | self.name = 'pixAP' 24 | self.sampler = sampler 25 | 26 | def loss_from_ap(self, ap, rel): 27 | return 1 - ap 28 | 29 | def forward(self, descriptors, aflow, **kw): 30 | # subsample things 31 | scores, gt, msk, qconf = self.sampler(descriptors, kw.get('reliability'), aflow) 32 | 33 | # compute pixel-wise AP 34 | n = qconf.numel() 35 | if n == 0: return 0 36 | scores, gt = scores.view(n,-1), gt.view(n,-1) 37 | ap = self.aploss(scores, gt).view(msk.shape) 38 | 39 | pixel_loss = self.loss_from_ap(ap, qconf) 40 | 41 | loss = pixel_loss[msk].mean() 42 | return loss 43 | 44 | 45 | class ReliabilityLoss (PixelAPLoss): 46 | """ same than PixelAPLoss, but also train a pixel-wise confidence 47 | that this pixel is going to have a good AP. 48 | """ 49 | def __init__(self, sampler, base=0.5, **kw): 50 | PixelAPLoss.__init__(self, sampler, **kw) 51 | assert 0 <= base < 1 52 | self.base = base 53 | self.name = 'reliability' 54 | 55 | def loss_from_ap(self, ap, rel): 56 | return 1 - ap*rel - (1-rel)*self.base 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/model/repeatability_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-present NAVER Corp. 2 | # CC BY-NC-SA 3.0 3 | # Available only for non-commercial use 4 | 5 | import pdb 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | from model.sampler import FullSampler 12 | 13 | class CosimLoss (nn.Module): 14 | """ Try to make the repeatability repeatable from one image to the other. 15 | """ 16 | def __init__(self, N=16): 17 | nn.Module.__init__(self) 18 | self.name = f'cosim{N}' 19 | self.patches = nn.Unfold(N, padding=0, stride=N//2) 20 | 21 | def extract_patches(self, sal): 22 | patches = self.patches(sal).transpose(1,2) # flatten 23 | patches = F.normalize(patches, p=2, dim=2) # norm 24 | return patches 25 | 26 | def forward(self, repeatability, aflow, **kw): 27 | B,two,H,W = aflow.shape 28 | assert two == 2 29 | 30 | # normalize 31 | sali1, sali2 = repeatability 32 | grid = FullSampler._aflow_to_grid(aflow) 33 | sali2 = F.grid_sample(sali2, grid, mode='bilinear', padding_mode='border') 34 | 35 | patches1 = self.extract_patches(sali1) 36 | patches2 = self.extract_patches(sali2) 37 | cosim = (patches1 * patches2).sum(dim=2) 38 | return 1 - cosim.mean() 39 | 40 | 41 | class PeakyLoss (nn.Module): 42 | """ Try to make the repeatability locally peaky. 43 | 44 | Mechanism: we maximize, for each pixel, the difference between the local mean 45 | and the local max. 46 | """ 47 | def __init__(self, N=16): 48 | nn.Module.__init__(self) 49 | self.name = f'peaky{N}' 50 | assert N % 2 == 0, 'N must be pair' 51 | self.preproc = nn.AvgPool2d(3, stride=1, padding=1) 52 | self.maxpool = nn.MaxPool2d(N+1, stride=1, padding=N//2) 53 | self.avgpool = nn.AvgPool2d(N+1, stride=1, padding=N//2) 54 | 55 | def forward_one(self, sali): 56 | sali = self.preproc(sali) # remove super high frequency 57 | return 1 - (self.maxpool(sali) - self.avgpool(sali)).mean() 58 | 59 | def forward(self, repeatability, **kw): 60 | sali1, sali2 = repeatability 61 | return (self.forward_one(sali1) + self.forward_one(sali2)) /2 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/model/sync_batchnorm/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : __init__.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | from .batchnorm import set_sbn_eps_mode 12 | from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d 13 | from .batchnorm import patch_sync_batchnorm, convert_model 14 | from .replicate import DataParallelWithCallback, patch_replication_callback 15 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/model/sync_batchnorm/batchnorm_reimpl.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : batchnorm_reimpl.py 4 | # Author : acgtyrant 5 | # Date : 11/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.init as init 14 | 15 | __all__ = ['BatchNorm2dReimpl'] 16 | 17 | 18 | class BatchNorm2dReimpl(nn.Module): 19 | """ 20 | A re-implementation of batch normalization, used for testing the numerical 21 | stability. 22 | 23 | Author: acgtyrant 24 | See also: 25 | https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14 26 | """ 27 | def __init__(self, num_features, eps=1e-5, momentum=0.1): 28 | super().__init__() 29 | 30 | self.num_features = num_features 31 | self.eps = eps 32 | self.momentum = momentum 33 | self.weight = nn.Parameter(torch.empty(num_features)) 34 | self.bias = nn.Parameter(torch.empty(num_features)) 35 | self.register_buffer('running_mean', torch.zeros(num_features)) 36 | self.register_buffer('running_var', torch.ones(num_features)) 37 | self.reset_parameters() 38 | 39 | def reset_running_stats(self): 40 | self.running_mean.zero_() 41 | self.running_var.fill_(1) 42 | 43 | def reset_parameters(self): 44 | self.reset_running_stats() 45 | init.uniform_(self.weight) 46 | init.zeros_(self.bias) 47 | 48 | def forward(self, input_): 49 | batchsize, channels, height, width = input_.size() 50 | numel = batchsize * height * width 51 | input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel) 52 | sum_ = input_.sum(1) 53 | sum_of_square = input_.pow(2).sum(1) 54 | mean = sum_ / numel 55 | sumvar = sum_of_square - sum_ * mean 56 | 57 | self.running_mean = ( 58 | (1 - self.momentum) * self.running_mean 59 | + self.momentum * mean.detach() 60 | ) 61 | unbias_var = sumvar / (numel - 1) 62 | self.running_var = ( 63 | (1 - self.momentum) * self.running_var 64 | + self.momentum * unbias_var.detach() 65 | ) 66 | 67 | bias_var = sumvar / numel 68 | inv_std = 1 / (bias_var + self.eps).pow(0.5) 69 | output = ( 70 | (input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) * 71 | self.weight.unsqueeze(1) + self.bias.unsqueeze(1)) 72 | 73 | return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous() 74 | 75 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/model/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : unittest.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import unittest 12 | import torch 13 | 14 | 15 | class TorchTestCase(unittest.TestCase): 16 | def assertTensorClose(self, x, y): 17 | adiff = float((x - y).abs().max()) 18 | if (y == 0).all(): 19 | rdiff = 'NaN' 20 | else: 21 | rdiff = float((adiff / y).abs().max()) 22 | 23 | message = ( 24 | 'Tensor close check failed\n' 25 | 'adiff={}\n' 26 | 'rdiff={}\n' 27 | ).format(adiff, rdiff) 28 | self.assertTrue(torch.allclose(x, y, atol=1e-5, rtol=1e-3), message) 29 | 30 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/plot_hist.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | def plot_hist_helper(path): 5 | # An "interface" to matplotlib.axes.Axes.hist() method 6 | plt.figure() 7 | data = np.load(f'{path}/resnpy.npy', allow_pickle=True) 8 | n, bins, patches = plt.hist(x=data, bins=np.linspace(0, 100, 20)) 9 | plt.title("Test MACE") 10 | plt.ylim(0, 20000) 11 | plt.xlabel("MACE") 12 | plt.ylabel("Frequency") 13 | plt.savefig(f"{path}/hist.png") 14 | plt.close() 15 | 16 | # plt.figure() 17 | # flow_data = np.load(f'{path}/flownpy.npy', allow_pickle=True) 18 | # n, bins, patches = plt.hist(x=flow_data, bins=np.linspace(0, 100, 20)) 19 | # plt.title("Test Flow") 20 | # plt.ylim(0, 20000) 21 | # plt.xlabel("Flow") 22 | # plt.ylabel("Frequency") 23 | # plt.savefig(f"{path}/flowhist.png") 24 | # plt.close() 25 | 26 | if __name__ == '__main__': 27 | path = "IHN_results/satellite_thermal_dense" 28 | plot_hist_helper(path) -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/tools/common.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-present NAVER Corp. 2 | # CC BY-NC-SA 3.0 3 | # Available only for non-commercial use 4 | 5 | import os, pdb#, shutil 6 | import numpy as np 7 | import torch 8 | 9 | 10 | def mkdir_for(file_path): 11 | os.makedirs(os.path.split(file_path)[0], exist_ok=True) 12 | 13 | 14 | def model_size(model): 15 | ''' Computes the number of parameters of the model 16 | ''' 17 | size = 0 18 | for weights in model.state_dict().values(): 19 | size += np.prod(weights.shape) 20 | return size 21 | 22 | 23 | def torch_set_gpu(gpus): 24 | if type(gpus) is int: 25 | gpus = [gpus] 26 | 27 | cuda = all(gpu>=0 for gpu in gpus) 28 | 29 | if cuda: 30 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in gpus]) 31 | assert cuda and torch.cuda.is_available(), "%s has GPUs %s unavailable" % ( 32 | os.environ['HOSTNAME'],os.environ['CUDA_VISIBLE_DEVICES']) 33 | torch.backends.cudnn.benchmark = True # speed-up cudnn 34 | torch.backends.cudnn.fastest = True # even more speed-up? 35 | print( 'Launching on GPUs ' + os.environ['CUDA_VISIBLE_DEVICES'] ) 36 | 37 | else: 38 | print( 'Launching on CPU' ) 39 | 40 | return cuda 41 | 42 | -------------------------------------------------------------------------------- /keypoint_pipeline/myr2d2/tools/trainer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-present NAVER Corp. 2 | # CC BY-NC-SA 3.0 3 | # Available only for non-commercial use 4 | 5 | import pdb 6 | from tqdm import tqdm 7 | from collections import defaultdict 8 | 9 | import torch 10 | import torch.nn as nn 11 | 12 | 13 | class Trainer (nn.Module): 14 | """ Helper class to train a deep network. 15 | Overload this class `forward_backward` for your actual needs. 16 | 17 | Usage: 18 | train = Trainer(net, loader, loss, optimizer) 19 | for epoch in range(n_epochs): 20 | train() 21 | """ 22 | def __init__(self, net, loader, loss, optimizer): 23 | nn.Module.__init__(self) 24 | self.net = net 25 | self.loader = loader 26 | self.loss_func = loss 27 | self.optimizer = optimizer 28 | 29 | def iscuda(self): 30 | return next(self.net.parameters()).device != torch.device('cpu') 31 | 32 | def todevice(self, x): 33 | if isinstance(x, dict): 34 | return {k:self.todevice(v) for k,v in x.items()} 35 | if isinstance(x, (tuple,list)): 36 | return [self.todevice(v) for v in x] 37 | 38 | if self.iscuda(): 39 | return x.contiguous().cuda(non_blocking=True) 40 | else: 41 | return x.cpu() 42 | 43 | def __call__(self): 44 | self.net.train() 45 | 46 | stats = defaultdict(list) 47 | 48 | for iter,inputs in enumerate(tqdm(self.loader)): 49 | inputs = self.todevice(inputs) 50 | 51 | # compute gradient and do model update 52 | self.optimizer.zero_grad() 53 | 54 | loss, details = self.forward_backward(inputs) 55 | if torch.isnan(loss): 56 | raise RuntimeError('Loss is NaN') 57 | 58 | self.optimizer.step() 59 | 60 | for key, val in details.items(): 61 | stats[key].append( val ) 62 | 63 | print(" Summary of losses during this epoch:") 64 | mean = lambda lis: sum(lis) / len(lis) 65 | for loss_name, vals in stats.items(): 66 | N = 1 + len(vals)//10 67 | print(f" - {loss_name:20}:", end='') 68 | print(f" {mean(vals[:N]):.3f} --> {mean(vals[-N:]):.3f} (avg: {mean(vals):.3f})") 69 | return mean(stats['loss']) # return average loss 70 | 71 | def forward_backward(self, inputs): 72 | raise NotImplementedError() 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /local_pipeline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/local_pipeline/__init__.py -------------------------------------------------------------------------------- /local_pipeline/commons.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains some functions and classes which can be useful in very diverse projects. 3 | """ 4 | 5 | import os 6 | import sys 7 | import torch 8 | import random 9 | import logging 10 | import traceback 11 | import numpy as np 12 | from os.path import join 13 | 14 | 15 | def make_deterministic(seed=0): 16 | """Make results deterministic. If seed == -1, do not make deterministic. 17 | Running the script in a deterministic way might slow it down. 18 | """ 19 | if seed == -1: 20 | return 21 | random.seed(seed) 22 | np.random.seed(seed) 23 | torch.manual_seed(seed) 24 | torch.cuda.manual_seed(seed) 25 | torch.backends.cudnn.deterministic = True 26 | torch.backends.cudnn.benchmark = False 27 | 28 | 29 | def setup_logging( 30 | save_dir, console="debug", info_filename="info.log", debug_filename="debug.log" 31 | ): 32 | """Set up logging files and console output. 33 | Creates one file for INFO logs and one for DEBUG logs. 34 | Args: 35 | save_dir (str): creates the folder where to save the files. 36 | debug (str): 37 | if == "debug" prints on console debug messages and higher 38 | if == "info" prints on console info messages and higher 39 | if == None does not use console (useful when a logger has already been set) 40 | info_filename (str): the name of the info file. if None, don't create info file 41 | debug_filename (str): the name of the debug file. if None, don't create debug file 42 | """ 43 | if os.path.exists(save_dir): 44 | raise FileExistsError(f"{save_dir} already exists!") 45 | os.makedirs(save_dir, exist_ok=True) 46 | # logging.Logger.manager.loggerDict.keys() to check which loggers are in use 47 | base_formatter = logging.Formatter( 48 | "%(asctime)s %(message)s", "%Y-%m-%d %H:%M:%S") 49 | logger = logging.getLogger("") 50 | logger.setLevel(logging.DEBUG) 51 | 52 | if info_filename != None: 53 | info_file_handler = logging.FileHandler(join(save_dir, info_filename)) 54 | info_file_handler.setLevel(logging.INFO) 55 | info_file_handler.setFormatter(base_formatter) 56 | logger.addHandler(info_file_handler) 57 | 58 | if debug_filename != None: 59 | debug_file_handler = logging.FileHandler( 60 | join(save_dir, debug_filename)) 61 | debug_file_handler.setLevel(logging.DEBUG) 62 | debug_file_handler.setFormatter(base_formatter) 63 | logger.addHandler(debug_file_handler) 64 | 65 | if console != None: 66 | console_handler = logging.StreamHandler() 67 | if console == "debug": 68 | console_handler.setLevel(logging.DEBUG) 69 | if console == "info": 70 | console_handler.setLevel(logging.INFO) 71 | console_handler.setFormatter(base_formatter) 72 | logger.addHandler(console_handler) 73 | 74 | def exception_handler(type_, value, tb): 75 | logger.info("\n" + "".join(traceback.format_exception(type, value, tb))) 76 | 77 | sys.excepthook = exception_handler 78 | -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_128_DHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_18-02-55-fd0e2a0f-93fe-43b2-b375-5a63b549d55e/UAGL.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_19-54-45-d671fa02-a5fb-4de6-ae64-d21f6e90b8ee/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_20-32-51-a34b6df3-7c41-400c-b0df-f47a3a3eca10/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_21-29-08-af2d4063-d758-475f-82b2-d1a1c09ef9fa/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-29_02-16-04-51fe7fc9-92b1-4255-9387-03e20e6a5bc7/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_128_IHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_larger_ori_train-2024-02-18_20-50-42-ed74e93e-0c1a-4926-9d12-21aa8c257e12/RHWF.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_19-41-46-a575c114-611d-4784-9f98-7baa364e292b/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_19-53-14-35d30204-168b-40ae-869b-701556ee7e68/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_20-43-11-a4095c4c-e4f9-46e4-bc7f-56da39572ff5/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_22-02-31-458c2af9-33fd-43b7-b587-006857e4ac7c/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_128_STHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_15-38-56-174bfc03-f499-45c1-86cf-b8ddeaaa6ec5/UAGL.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_15-39-02-748d8163-8523-48e6-a84c-b2b10105b923/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_16-04-08-9be1fb1d-90cb-4ca5-8a84-47721bb47c14/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_16-04-08-210c5ac3-7ce9-49d6-bc35-1356d4857ce7/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_16-04-08-3d347446-a4d0-4f44-9cd3-57767b4a3683/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_256_DHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-19_14-39-21-5d862420-4465-44c5-95c3-9ddd8e796d51/UAGL.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_15-37-57-efd28c71-aca8-4ae8-b5cd-b753f270c1a9/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_15-57-16-39687189-f669-46f8-a87a-aae3e969feab/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_16-02-10-e546a334-6059-4e45-8a78-b97984f7b939/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_17-54-35-bd2195bf-2b03-4578-b063-6c4b0b4c09d2/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_256_IHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_larger_ori_train-2024-02-18_18-44-07-cfb3bb2c-e987-4c17-bdc0-731bc776dcdd/RHWF.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_15-33-29-719caef5-7b5c-4821-8370-0230ed43cd27/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_15-45-21-97fd97a2-850b-4aec-be4f-b91830298b84/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_16-01-09-ff7525f5-5185-4043-8304-e1574cd07aee/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_17-04-06-68270bf3-5c85-4323-80f7-ffdd9b13954d/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_256_STHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_14-15-37-b45bf3b1-0e14-490e-b525-d2e9837838f8/UAGL.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_15-01-52-037e3cda-5700-4970-a275-3665300d24be/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_15-02-21-dc37f1fa-2be5-4e6a-b090-f0fb959d5fce/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_15-25-39-43a3ad48-cc87-40b8-8009-33d12e0e12d1/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-30_15-32-08-1e6b132b-db0c-4512-9e94-eae3ddee817e/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_512_DHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-19_12-34-56-acfb33d8-497c-4396-98d7-7485fdc30a13/UAGL.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_03-03-04-93d476f0-868e-4cb5-bbf6-d63235544f8e/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-27_21-13-58-0b4bda34-0b1e-4315-8149-5e6e4f84b3c3/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_03-05-03-c12014de-beef-4a9c-8668-d8d38949b2f9/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_03-07-05-781558cb-e245-4779-9cfc-9f774d11cfda/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_512_IHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_larger_ori_train-2024-02-18_18-44-07-97a33213-80a2-4f50-9d85-9ad04d7df728/RHWF.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_02-40-01-682823f2-c8a5-4e4a-9a99-4ad7d603923c/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_03-02-06-e2d3d674-8df6-4122-a861-0a739bfae3e6/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_03-02-17-f69b593b-7c70-4868-b457-05e48fd083f0/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-28_03-02-02-c7896f27-0dd3-4815-9f8d-328ff4ae5f76/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/ensembles/ensemble_512_STHN.txt: -------------------------------------------------------------------------------- 1 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-04-27_00-45-46-07ebf9cb-0452-4163-a883-ef4a57b7a5a8/UAGL.pth 2 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-06_19-58-59-65f57d99-a917-4590-99b1-3f9301c8d8e8/UAGL.pth 3 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-06_20-02-59-a3acba4b-a8c5-42cc-afd2-be12e06e429c/UAGL.pth 4 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-06_20-03-08-5481be09-c8d7-479b-80fb-660b2b11f43e/UAGL.pth 5 | logs/local_he/satellite_0_thermalmapping_135_nocontrast_dense_exclusion_largest_ori_train-2024-05-06_23-23-34-414eb52c-fc3e-4ab3-8f77-933e22cdd297/UAGL.pth -------------------------------------------------------------------------------- /local_pipeline/model/ATT/attention_package.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: attention-package 3 | Version: 0.2 4 | Summary: attention layer 5 | Author: Saurus 6 | Author-email: jia1saurus@gmail.com 7 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/attention_package.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | setup.py 2 | attention_package.egg-info/PKG-INFO 3 | attention_package.egg-info/SOURCES.txt 4 | attention_package.egg-info/dependency_links.txt 5 | attention_package.egg-info/top_level.txt 6 | src/attention_cuda.cpp 7 | src/attention_kernel.cu -------------------------------------------------------------------------------- /local_pipeline/model/ATT/attention_package.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/attention_package.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | at_cuda 2 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/dist/attention_package-0.2-py3.9-linux-x86_64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/local_pipeline/model/ATT/dist/attention_package-0.2-py3.9-linux-x86_64.egg -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/.appveyor.yml: -------------------------------------------------------------------------------- 1 | version: 1.0.{build} 2 | image: 3 | - Visual Studio 2017 4 | - Visual Studio 2015 5 | test: off 6 | skip_branch_with_pr: true 7 | build: 8 | parallel: true 9 | platform: 10 | - x64 11 | - x86 12 | environment: 13 | matrix: 14 | - PYTHON: 36 15 | CPP: 14 16 | CONFIG: Debug 17 | - PYTHON: 27 18 | CPP: 14 19 | CONFIG: Debug 20 | - CONDA: 36 21 | CPP: latest 22 | CONFIG: Release 23 | matrix: 24 | exclude: 25 | - image: Visual Studio 2015 26 | platform: x86 27 | - image: Visual Studio 2015 28 | CPP: latest 29 | - image: Visual Studio 2017 30 | CPP: latest 31 | platform: x86 32 | install: 33 | - ps: | 34 | if ($env:PLATFORM -eq "x64") { $env:CMAKE_ARCH = "x64" } 35 | if ($env:APPVEYOR_JOB_NAME -like "*Visual Studio 2017*") { 36 | $env:CMAKE_GENERATOR = "Visual Studio 15 2017" 37 | $env:CMAKE_INCLUDE_PATH = "C:\Libraries\boost_1_64_0" 38 | $env:CXXFLAGS = "-permissive-" 39 | } else { 40 | $env:CMAKE_GENERATOR = "Visual Studio 14 2015" 41 | } 42 | if ($env:PYTHON) { 43 | if ($env:PLATFORM -eq "x64") { $env:PYTHON = "$env:PYTHON-x64" } 44 | $env:PATH = "C:\Python$env:PYTHON\;C:\Python$env:PYTHON\Scripts\;$env:PATH" 45 | python -W ignore -m pip install --upgrade pip wheel 46 | python -W ignore -m pip install pytest numpy --no-warn-script-location 47 | } elseif ($env:CONDA) { 48 | if ($env:CONDA -eq "27") { $env:CONDA = "" } 49 | if ($env:PLATFORM -eq "x64") { $env:CONDA = "$env:CONDA-x64" } 50 | $env:PATH = "C:\Miniconda$env:CONDA\;C:\Miniconda$env:CONDA\Scripts\;$env:PATH" 51 | $env:PYTHONHOME = "C:\Miniconda$env:CONDA" 52 | conda --version 53 | conda install -y -q pytest numpy scipy 54 | } 55 | - ps: | 56 | Start-FileDownload 'http://bitbucket.org/eigen/eigen/get/3.3.3.zip' 57 | 7z x 3.3.3.zip -y > $null 58 | $env:CMAKE_INCLUDE_PATH = "eigen-eigen-67e894c6cd8f;$env:CMAKE_INCLUDE_PATH" 59 | build_script: 60 | - cmake -G "%CMAKE_GENERATOR%" -A "%CMAKE_ARCH%" 61 | -DPYBIND11_CPP_STANDARD=/std:c++%CPP% 62 | -DPYBIND11_WERROR=ON 63 | -DDOWNLOAD_CATCH=ON 64 | -DCMAKE_SUPPRESS_REGENERATION=1 65 | . 66 | - set MSBuildLogger="C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" 67 | - cmake --build . --config %CONFIG% --target pytest -- /m /v:m /logger:%MSBuildLogger% 68 | - cmake --build . --config %CONFIG% --target cpptest -- /m /v:m /logger:%MSBuildLogger% 69 | - if "%CPP%"=="latest" (cmake --build . --config %CONFIG% --target test_cmake_build -- /m /v:m /logger:%MSBuildLogger%) 70 | on_failure: if exist "tests\test_cmake_build" type tests\test_cmake_build\*.log* 71 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/.gitignore: -------------------------------------------------------------------------------- 1 | CMakeCache.txt 2 | CMakeFiles 3 | Makefile 4 | cmake_install.cmake 5 | .DS_Store 6 | *.so 7 | *.pyd 8 | *.dll 9 | *.sln 10 | *.sdf 11 | *.opensdf 12 | *.vcxproj 13 | *.vcxproj.user 14 | *.filters 15 | example.dir 16 | Win32 17 | x64 18 | Release 19 | Debug 20 | .vs 21 | CTestTestfile.cmake 22 | Testing 23 | autogen 24 | MANIFEST 25 | /.ninja_* 26 | /*.ninja 27 | /docs/.build 28 | *.py[co] 29 | *.egg-info 30 | *~ 31 | .*.swp 32 | .DS_Store 33 | /dist 34 | /build 35 | /cmake/ 36 | .cache/ 37 | sosize-*.txt 38 | pybind11Config*.cmake 39 | pybind11Targets.cmake 40 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "tools/clang"] 2 | path = tools/clang 3 | url = ../../wjakob/clang-cindex-python3 4 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/.readthedocs.yml: -------------------------------------------------------------------------------- 1 | python: 2 | version: 3 3 | requirements_file: docs/requirements.txt 4 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Thank you for your interest in this project! Please refer to the following 2 | sections on how to contribute code and bug reports. 3 | 4 | ### Reporting bugs 5 | 6 | At the moment, this project is run in the spare time of a single person 7 | ([Wenzel Jakob](http://rgl.epfl.ch/people/wjakob)) with very limited resources 8 | for issue tracker tickets. Thus, before submitting a question or bug report, 9 | please take a moment of your time and ensure that your issue isn't already 10 | discussed in the project documentation provided at 11 | [http://pybind11.readthedocs.org/en/latest](http://pybind11.readthedocs.org/en/latest). 12 | 13 | Assuming that you have identified a previously unknown problem or an important 14 | question, it's essential that you submit a self-contained and minimal piece of 15 | code that reproduces the problem. In other words: no external dependencies, 16 | isolate the function(s) that cause breakage, submit matched and complete C++ 17 | and Python snippets that can be easily compiled and run on my end. 18 | 19 | ## Pull requests 20 | Contributions are submitted, reviewed, and accepted using Github pull requests. 21 | Please refer to [this 22 | article](https://help.github.com/articles/using-pull-requests) for details and 23 | adhere to the following rules to make the process as smooth as possible: 24 | 25 | * Make a new branch for every feature you're working on. 26 | * Make small and clean pull requests that are easy to review but make sure they 27 | do add value by themselves. 28 | * Add tests for any new functionality and run the test suite (``make pytest``) 29 | to ensure that no existing features break. 30 | * Please run ``flake8`` and ``tools/check-style.sh`` to check your code matches 31 | the project style. (Note that ``check-style.sh`` requires ``gawk``.) 32 | * This project has a strong focus on providing general solutions using a 33 | minimal amount of code, thus small pull requests are greatly preferred. 34 | 35 | ### Licensing of contributions 36 | 37 | pybind11 is provided under a BSD-style license that can be found in the 38 | ``LICENSE`` file. By using, distributing, or contributing to this project, you 39 | agree to the terms and conditions of this license. 40 | 41 | You are under no obligation whatsoever to provide any bug fixes, patches, or 42 | upgrades to the features, functionality or performance of the source code 43 | ("Enhancements") to anyone; however, if you choose to make your Enhancements 44 | available either publicly, or directly to the author of this software, without 45 | imposing a separate written license agreement for such Enhancements, then you 46 | hereby grant the following license: a non-exclusive, royalty-free perpetual 47 | license to install, use, modify, prepare derivative works, incorporate into 48 | other computer software, distribute, and sublicense such enhancements or 49 | derivative works thereof, in binary and source code form. 50 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Make sure you've completed the following steps before submitting your issue -- thank you! 2 | 3 | 1. Check if your question has already been answered in the [FAQ](http://pybind11.readthedocs.io/en/latest/faq.html) section. 4 | 2. Make sure you've read the [documentation](http://pybind11.readthedocs.io/en/latest/). Your issue may be addressed there. 5 | 3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room](https://gitter.im/pybind/Lobby). 6 | 4. If you have a genuine bug report or a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below. 7 | 5. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible. 8 | 9 | *After reading, remove this checklist and the template text in parentheses below.* 10 | 11 | ## Issue description 12 | 13 | (Provide a short description, state the expected behavior and what actually happens.) 14 | 15 | ## Reproducible example code 16 | 17 | (The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.) 18 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 Wenzel Jakob , All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of the copyright holder nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 21 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | 28 | Please also refer to the file CONTRIBUTING.md, which clarifies licensing of 29 | external contributions to this project including patches, pull requests, etc. 30 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include include/pybind11 *.h 2 | include LICENSE README.md CONTRIBUTING.md 3 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/Doxyfile: -------------------------------------------------------------------------------- 1 | PROJECT_NAME = pybind11 2 | INPUT = ../include/pybind11/ 3 | RECURSIVE = YES 4 | 5 | GENERATE_HTML = NO 6 | GENERATE_LATEX = NO 7 | GENERATE_XML = YES 8 | XML_OUTPUT = .build/doxygenxml 9 | XML_PROGRAMLISTING = YES 10 | 11 | MACRO_EXPANSION = YES 12 | EXPAND_ONLY_PREDEF = YES 13 | EXPAND_AS_DEFINED = PYBIND11_RUNTIME_EXCEPTION 14 | 15 | ALIASES = "rst=\verbatim embed:rst" 16 | ALIASES += "endrst=\endverbatim" 17 | 18 | QUIET = YES 19 | WARNINGS = YES 20 | WARN_IF_UNDOCUMENTED = NO 21 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/_static/theme_overrides.css: -------------------------------------------------------------------------------- 1 | .wy-table-responsive table td, 2 | .wy-table-responsive table th { 3 | white-space: initial !important; 4 | } 5 | .rst-content table.docutils td { 6 | vertical-align: top !important; 7 | } 8 | div[class^='highlight'] pre { 9 | white-space: pre; 10 | white-space: pre-wrap; 11 | } 12 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/advanced/cast/index.rst: -------------------------------------------------------------------------------- 1 | Type conversions 2 | ################ 3 | 4 | Apart from enabling cross-language function calls, a fundamental problem 5 | that a binding tool like pybind11 must address is to provide access to 6 | native Python types in C++ and vice versa. There are three fundamentally 7 | different ways to do this—which approach is preferable for a particular type 8 | depends on the situation at hand. 9 | 10 | 1. Use a native C++ type everywhere. In this case, the type must be wrapped 11 | using pybind11-generated bindings so that Python can interact with it. 12 | 13 | 2. Use a native Python type everywhere. It will need to be wrapped so that 14 | C++ functions can interact with it. 15 | 16 | 3. Use a native C++ type on the C++ side and a native Python type on the 17 | Python side. pybind11 refers to this as a *type conversion*. 18 | 19 | Type conversions are the most "natural" option in the sense that native 20 | (non-wrapped) types are used everywhere. The main downside is that a copy 21 | of the data must be made on every Python ↔ C++ transition: this is 22 | needed since the C++ and Python versions of the same type generally won't 23 | have the same memory layout. 24 | 25 | pybind11 can perform many kinds of conversions automatically. An overview 26 | is provided in the table ":ref:`conversion_table`". 27 | 28 | The following subsections discuss the differences between these options in more 29 | detail. The main focus in this section is on type conversions, which represent 30 | the last case of the above list. 31 | 32 | .. toctree:: 33 | :maxdepth: 1 34 | 35 | overview 36 | strings 37 | stl 38 | functional 39 | chrono 40 | eigen 41 | custom 42 | 43 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/advanced/pycpp/index.rst: -------------------------------------------------------------------------------- 1 | Python C++ interface 2 | #################### 3 | 4 | pybind11 exposes Python types and functions using thin C++ wrappers, which 5 | makes it possible to conveniently call Python code from C++ without resorting 6 | to Python's C API. 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | object 12 | numpy 13 | utilities 14 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/index.rst: -------------------------------------------------------------------------------- 1 | .. only: not latex 2 | 3 | .. image:: pybind11-logo.png 4 | 5 | pybind11 --- Seamless operability between C++11 and Python 6 | ========================================================== 7 | 8 | .. only: not latex 9 | 10 | Contents: 11 | 12 | .. toctree:: 13 | :maxdepth: 1 14 | 15 | intro 16 | changelog 17 | upgrade 18 | 19 | .. toctree:: 20 | :caption: The Basics 21 | :maxdepth: 2 22 | 23 | basics 24 | classes 25 | compiling 26 | 27 | .. toctree:: 28 | :caption: Advanced Topics 29 | :maxdepth: 2 30 | 31 | advanced/functions 32 | advanced/classes 33 | advanced/exceptions 34 | advanced/smart_ptrs 35 | advanced/cast/index 36 | advanced/pycpp/index 37 | advanced/embedding 38 | advanced/misc 39 | 40 | .. toctree:: 41 | :caption: Extra Information 42 | :maxdepth: 1 43 | 44 | faq 45 | benchmark 46 | limitations 47 | reference 48 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/limitations.rst: -------------------------------------------------------------------------------- 1 | Limitations 2 | ########### 3 | 4 | pybind11 strives to be a general solution to binding generation, but it also has 5 | certain limitations: 6 | 7 | - pybind11 casts away ``const``-ness in function arguments and return values. 8 | This is in line with the Python language, which has no concept of ``const`` 9 | values. This means that some additional care is needed to avoid bugs that 10 | would be caught by the type checker in a traditional C++ program. 11 | 12 | - The NumPy interface ``pybind11::array`` greatly simplifies accessing 13 | numerical data from C++ (and vice versa), but it's not a full-blown array 14 | class like ``Eigen::Array`` or ``boost.multi_array``. 15 | 16 | These features could be implemented but would lead to a significant increase in 17 | complexity. I've decided to draw the line here to keep this project simple and 18 | compact. Users who absolutely require these features are encouraged to fork 19 | pybind11. 20 | 21 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/pybind11-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/local_pipeline/model/ATT/pybind11-master/docs/pybind11-logo.png -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/pybind11_vs_boost_python1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/local_pipeline/model/ATT/pybind11-master/docs/pybind11_vs_boost_python1.png -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/pybind11_vs_boost_python2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/local_pipeline/model/ATT/pybind11-master/docs/pybind11_vs_boost_python2.png -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/reference.rst: -------------------------------------------------------------------------------- 1 | .. _reference: 2 | 3 | .. warning:: 4 | 5 | Please be advised that the reference documentation discussing pybind11 6 | internals is currently incomplete. Please refer to the previous sections 7 | and the pybind11 header files for the nitty gritty details. 8 | 9 | Reference 10 | ######### 11 | 12 | .. _macros: 13 | 14 | Macros 15 | ====== 16 | 17 | .. doxygendefine:: PYBIND11_MODULE 18 | 19 | .. _core_types: 20 | 21 | Convenience classes for arbitrary Python types 22 | ============================================== 23 | 24 | Common member functions 25 | ----------------------- 26 | 27 | .. doxygenclass:: object_api 28 | :members: 29 | 30 | Without reference counting 31 | -------------------------- 32 | 33 | .. doxygenclass:: handle 34 | :members: 35 | 36 | With reference counting 37 | ----------------------- 38 | 39 | .. doxygenclass:: object 40 | :members: 41 | 42 | .. doxygenfunction:: reinterpret_borrow 43 | 44 | .. doxygenfunction:: reinterpret_steal 45 | 46 | Convenience classes for specific Python types 47 | ============================================= 48 | 49 | .. doxygenclass:: module 50 | :members: 51 | 52 | .. doxygengroup:: pytypes 53 | :members: 54 | 55 | .. _extras: 56 | 57 | Passing extra arguments to ``def`` or ``class_`` 58 | ================================================ 59 | 60 | .. doxygengroup:: annotations 61 | :members: 62 | 63 | Embedding the interpreter 64 | ========================= 65 | 66 | .. doxygendefine:: PYBIND11_EMBEDDED_MODULE 67 | 68 | .. doxygenfunction:: initialize_interpreter 69 | 70 | .. doxygenfunction:: finalize_interpreter 71 | 72 | .. doxygenclass:: scoped_interpreter 73 | 74 | Redirecting C++ streams 75 | ======================= 76 | 77 | .. doxygenclass:: scoped_ostream_redirect 78 | 79 | .. doxygenclass:: scoped_estream_redirect 80 | 81 | .. doxygenfunction:: add_ostream_redirect 82 | 83 | Python built-in functions 84 | ========================= 85 | 86 | .. doxygengroup:: python_builtins 87 | :members: 88 | 89 | Inheritance 90 | =========== 91 | 92 | See :doc:`/classes` and :doc:`/advanced/classes` for more detail. 93 | 94 | .. doxygendefine:: PYBIND11_OVERLOAD 95 | 96 | .. doxygendefine:: PYBIND11_OVERLOAD_PURE 97 | 98 | .. doxygendefine:: PYBIND11_OVERLOAD_NAME 99 | 100 | .. doxygendefine:: PYBIND11_OVERLOAD_PURE_NAME 101 | 102 | .. doxygenfunction:: get_overload 103 | 104 | Exceptions 105 | ========== 106 | 107 | .. doxygenclass:: error_already_set 108 | :members: 109 | 110 | .. doxygenclass:: builtin_exception 111 | :members: 112 | 113 | 114 | Literals 115 | ======== 116 | 117 | .. doxygennamespace:: literals 118 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/release.rst: -------------------------------------------------------------------------------- 1 | To release a new version of pybind11: 2 | 3 | - Update the version number and push to pypi 4 | - Update ``pybind11/_version.py`` (set release version, remove 'dev'). 5 | - Update ``PYBIND11_VERSION_MAJOR`` etc. in ``include/pybind11/detail/common.h``. 6 | - Ensure that all the information in ``setup.py`` is up-to-date. 7 | - Update version in ``docs/conf.py``. 8 | - Tag release date in ``docs/changelog.rst``. 9 | - ``git add`` and ``git commit``. 10 | - if new minor version: ``git checkout -b vX.Y``, ``git push -u origin vX.Y`` 11 | - ``git tag -a vX.Y.Z -m 'vX.Y.Z release'``. 12 | - ``git push`` 13 | - ``git push --tags``. 14 | - ``python setup.py sdist upload``. 15 | - ``python setup.py bdist_wheel upload``. 16 | - Get back to work 17 | - Update ``_version.py`` (add 'dev' and increment minor). 18 | - Update version in ``docs/conf.py`` 19 | - Update version macros in ``include/pybind11/common.h`` 20 | - ``git add`` and ``git commit``. 21 | ``git push`` 22 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/docs/requirements.txt: -------------------------------------------------------------------------------- 1 | breathe == 4.5.0 2 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/include/pybind11/common.h: -------------------------------------------------------------------------------- 1 | #include "detail/common.h" 2 | #warning "Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'." 3 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/include/pybind11/complex.h: -------------------------------------------------------------------------------- 1 | /* 2 | pybind11/complex.h: Complex number support 3 | 4 | Copyright (c) 2016 Wenzel Jakob 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #pragma once 11 | 12 | #include "pybind11.h" 13 | #include 14 | 15 | /// glibc defines I as a macro which breaks things, e.g., boost template names 16 | #ifdef I 17 | # undef I 18 | #endif 19 | 20 | NAMESPACE_BEGIN(PYBIND11_NAMESPACE) 21 | 22 | template struct format_descriptor, detail::enable_if_t::value>> { 23 | static constexpr const char c = format_descriptor::c; 24 | static constexpr const char value[3] = { 'Z', c, '\0' }; 25 | static std::string format() { return std::string(value); } 26 | }; 27 | 28 | #ifndef PYBIND11_CPP17 29 | 30 | template constexpr const char format_descriptor< 31 | std::complex, detail::enable_if_t::value>>::value[3]; 32 | 33 | #endif 34 | 35 | NAMESPACE_BEGIN(detail) 36 | 37 | template struct is_fmt_numeric, detail::enable_if_t::value>> { 38 | static constexpr bool value = true; 39 | static constexpr int index = is_fmt_numeric::index + 3; 40 | }; 41 | 42 | template class type_caster> { 43 | public: 44 | bool load(handle src, bool convert) { 45 | if (!src) 46 | return false; 47 | if (!convert && !PyComplex_Check(src.ptr())) 48 | return false; 49 | Py_complex result = PyComplex_AsCComplex(src.ptr()); 50 | if (result.real == -1.0 && PyErr_Occurred()) { 51 | PyErr_Clear(); 52 | return false; 53 | } 54 | value = std::complex((T) result.real, (T) result.imag); 55 | return true; 56 | } 57 | 58 | static handle cast(const std::complex &src, return_value_policy /* policy */, handle /* parent */) { 59 | return PyComplex_FromDoubles((double) src.real(), (double) src.imag()); 60 | } 61 | 62 | PYBIND11_TYPE_CASTER(std::complex, _("complex")); 63 | }; 64 | NAMESPACE_END(detail) 65 | NAMESPACE_END(PYBIND11_NAMESPACE) 66 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/include/pybind11/detail/typeid.h: -------------------------------------------------------------------------------- 1 | /* 2 | pybind11/detail/typeid.h: Compiler-independent access to type identifiers 3 | 4 | Copyright (c) 2016 Wenzel Jakob 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #pragma once 11 | 12 | #include 13 | #include 14 | 15 | #if defined(__GNUG__) 16 | #include 17 | #endif 18 | 19 | #include "common.h" 20 | 21 | NAMESPACE_BEGIN(PYBIND11_NAMESPACE) 22 | NAMESPACE_BEGIN(detail) 23 | /// Erase all occurrences of a substring 24 | inline void erase_all(std::string &string, const std::string &search) { 25 | for (size_t pos = 0;;) { 26 | pos = string.find(search, pos); 27 | if (pos == std::string::npos) break; 28 | string.erase(pos, search.length()); 29 | } 30 | } 31 | 32 | PYBIND11_NOINLINE inline void clean_type_id(std::string &name) { 33 | #if defined(__GNUG__) 34 | int status = 0; 35 | std::unique_ptr res { 36 | abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free }; 37 | if (status == 0) 38 | name = res.get(); 39 | #else 40 | detail::erase_all(name, "class "); 41 | detail::erase_all(name, "struct "); 42 | detail::erase_all(name, "enum "); 43 | #endif 44 | detail::erase_all(name, "pybind11::"); 45 | } 46 | NAMESPACE_END(detail) 47 | 48 | /// Return a string representation of a C++ type 49 | template static std::string type_id() { 50 | std::string name(typeid(T).name()); 51 | detail::clean_type_id(name); 52 | return name; 53 | } 54 | 55 | NAMESPACE_END(PYBIND11_NAMESPACE) 56 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/include/pybind11/options.h: -------------------------------------------------------------------------------- 1 | /* 2 | pybind11/options.h: global settings that are configurable at runtime. 3 | 4 | Copyright (c) 2016 Wenzel Jakob 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #pragma once 11 | 12 | #include "detail/common.h" 13 | 14 | NAMESPACE_BEGIN(PYBIND11_NAMESPACE) 15 | 16 | class options { 17 | public: 18 | 19 | // Default RAII constructor, which leaves settings as they currently are. 20 | options() : previous_state(global_state()) {} 21 | 22 | // Class is non-copyable. 23 | options(const options&) = delete; 24 | options& operator=(const options&) = delete; 25 | 26 | // Destructor, which restores settings that were in effect before. 27 | ~options() { 28 | global_state() = previous_state; 29 | } 30 | 31 | // Setter methods (affect the global state): 32 | 33 | options& disable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = false; return *this; } 34 | 35 | options& enable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = true; return *this; } 36 | 37 | options& disable_function_signatures() & { global_state().show_function_signatures = false; return *this; } 38 | 39 | options& enable_function_signatures() & { global_state().show_function_signatures = true; return *this; } 40 | 41 | // Getter methods (return the global state): 42 | 43 | static bool show_user_defined_docstrings() { return global_state().show_user_defined_docstrings; } 44 | 45 | static bool show_function_signatures() { return global_state().show_function_signatures; } 46 | 47 | // This type is not meant to be allocated on the heap. 48 | void* operator new(size_t) = delete; 49 | 50 | private: 51 | 52 | struct state { 53 | bool show_user_defined_docstrings = true; //< Include user-supplied texts in docstrings. 54 | bool show_function_signatures = true; //< Include auto-generated function signatures in docstrings. 55 | }; 56 | 57 | static state &global_state() { 58 | static state instance; 59 | return instance; 60 | } 61 | 62 | state previous_state; 63 | }; 64 | 65 | NAMESPACE_END(PYBIND11_NAMESPACE) 66 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/pybind11/__init__.py: -------------------------------------------------------------------------------- 1 | from ._version import version_info, __version__ # noqa: F401 imported but unused 2 | 3 | 4 | def get_include(user=False): 5 | import os 6 | d = os.path.dirname(__file__) 7 | if os.path.exists(os.path.join(d, "include")): 8 | # Package is installed 9 | return os.path.join(d, "include") 10 | else: 11 | # Package is from a source directory 12 | return os.path.join(os.path.dirname(d), "include") 13 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/pybind11/__main__.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import argparse 4 | import sys 5 | import sysconfig 6 | 7 | from . import get_include 8 | 9 | 10 | def print_includes(): 11 | dirs = [sysconfig.get_path('include'), 12 | sysconfig.get_path('platinclude'), 13 | get_include()] 14 | 15 | # Make unique but preserve order 16 | unique_dirs = [] 17 | for d in dirs: 18 | if d not in unique_dirs: 19 | unique_dirs.append(d) 20 | 21 | print(' '.join('-I' + d for d in unique_dirs)) 22 | 23 | 24 | def main(): 25 | parser = argparse.ArgumentParser(prog='python -m pybind11') 26 | parser.add_argument('--includes', action='store_true', 27 | help='Include flags for both pybind11 and Python headers.') 28 | args = parser.parse_args() 29 | if not sys.argv[1:]: 30 | parser.print_help() 31 | if args.includes: 32 | print_includes() 33 | 34 | 35 | if __name__ == '__main__': 36 | main() 37 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/pybind11/_version.py: -------------------------------------------------------------------------------- 1 | version_info = (2, 5, 'dev1') 2 | __version__ = '.'.join(map(str, version_info)) 3 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | 4 | [flake8] 5 | max-line-length = 99 6 | show_source = True 7 | exclude = .git, __pycache__, build, dist, docs, tools, venv 8 | ignore = 9 | # required for pretty matrix formatting: multiple spaces after `,` and `[` 10 | E201, E241, W504, 11 | # camelcase 'cPickle' imported as lowercase 'pickle' 12 | N813 13 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/cross_module_gil_utils.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/cross_module_gil_utils.cpp -- tools for acquiring GIL from a different module 3 | 4 | Copyright (c) 2019 Google LLC 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | #include 10 | #include 11 | 12 | // This file mimics a DSO that makes pybind11 calls but does not define a 13 | // PYBIND11_MODULE. The purpose is to test that such a DSO can create a 14 | // py::gil_scoped_acquire when the running thread is in a GIL-released state. 15 | // 16 | // Note that we define a Python module here for convenience, but in general 17 | // this need not be the case. The typical scenario would be a DSO that implements 18 | // shared logic used internally by multiple pybind11 modules. 19 | 20 | namespace { 21 | 22 | namespace py = pybind11; 23 | void gil_acquire() { py::gil_scoped_acquire gil; } 24 | 25 | constexpr char kModuleName[] = "cross_module_gil_utils"; 26 | 27 | #if PY_MAJOR_VERSION >= 3 28 | struct PyModuleDef moduledef = { 29 | PyModuleDef_HEAD_INIT, 30 | kModuleName, 31 | NULL, 32 | 0, 33 | NULL, 34 | NULL, 35 | NULL, 36 | NULL, 37 | NULL 38 | }; 39 | #else 40 | PyMethodDef module_methods[] = { 41 | {NULL, NULL, 0, NULL} 42 | }; 43 | #endif 44 | 45 | } // namespace 46 | 47 | extern "C" PYBIND11_EXPORT 48 | #if PY_MAJOR_VERSION >= 3 49 | PyObject* PyInit_cross_module_gil_utils() 50 | #else 51 | void initcross_module_gil_utils() 52 | #endif 53 | { 54 | 55 | PyObject* m = 56 | #if PY_MAJOR_VERSION >= 3 57 | PyModule_Create(&moduledef); 58 | #else 59 | Py_InitModule(kModuleName, module_methods); 60 | #endif 61 | 62 | if (m != NULL) { 63 | static_assert( 64 | sizeof(&gil_acquire) == sizeof(void*), 65 | "Function pointer must have the same size as void*"); 66 | PyModule_AddObject(m, "gil_acquire_funcaddr", 67 | PyLong_FromVoidPtr(reinterpret_cast(&gil_acquire))); 68 | } 69 | 70 | #if PY_MAJOR_VERSION >= 3 71 | return m; 72 | #endif 73 | } 74 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/local_bindings.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "pybind11_tests.h" 3 | 4 | /// Simple class used to test py::local: 5 | template class LocalBase { 6 | public: 7 | LocalBase(int i) : i(i) { } 8 | int i = -1; 9 | }; 10 | 11 | /// Registered with py::module_local in both main and secondary modules: 12 | using LocalType = LocalBase<0>; 13 | /// Registered without py::module_local in both modules: 14 | using NonLocalType = LocalBase<1>; 15 | /// A second non-local type (for stl_bind tests): 16 | using NonLocal2 = LocalBase<2>; 17 | /// Tests within-module, different-compilation-unit local definition conflict: 18 | using LocalExternal = LocalBase<3>; 19 | /// Mixed: registered local first, then global 20 | using MixedLocalGlobal = LocalBase<4>; 21 | /// Mixed: global first, then local 22 | using MixedGlobalLocal = LocalBase<5>; 23 | 24 | /// Registered with py::module_local only in the secondary module: 25 | using ExternalType1 = LocalBase<6>; 26 | using ExternalType2 = LocalBase<7>; 27 | 28 | using LocalVec = std::vector; 29 | using LocalVec2 = std::vector; 30 | using LocalMap = std::unordered_map; 31 | using NonLocalVec = std::vector; 32 | using NonLocalVec2 = std::vector; 33 | using NonLocalMap = std::unordered_map; 34 | using NonLocalMap2 = std::unordered_map; 35 | 36 | PYBIND11_MAKE_OPAQUE(LocalVec); 37 | PYBIND11_MAKE_OPAQUE(LocalVec2); 38 | PYBIND11_MAKE_OPAQUE(LocalMap); 39 | PYBIND11_MAKE_OPAQUE(NonLocalVec); 40 | //PYBIND11_MAKE_OPAQUE(NonLocalVec2); // same type as LocalVec2 41 | PYBIND11_MAKE_OPAQUE(NonLocalMap); 42 | PYBIND11_MAKE_OPAQUE(NonLocalMap2); 43 | 44 | 45 | // Simple bindings (used with the above): 46 | template 47 | py::class_ bind_local(Args && ...args) { 48 | return py::class_(std::forward(args)...) 49 | .def(py::init()) 50 | .def("get", [](T &i) { return i.i + Adjust; }); 51 | }; 52 | 53 | // Simulate a foreign library base class (to match the example in the docs): 54 | namespace pets { 55 | class Pet { 56 | public: 57 | Pet(std::string name) : name_(name) {} 58 | std::string name_; 59 | const std::string &name() { return name_; } 60 | }; 61 | } 62 | 63 | struct MixGL { int i; MixGL(int i) : i{i} {} }; 64 | struct MixGL2 { int i; MixGL2(int i) : i{i} {} }; 65 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/pybind11_tests.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | #if defined(_MSC_VER) && _MSC_VER < 1910 5 | // We get some really long type names here which causes MSVC 2015 to emit warnings 6 | # pragma warning(disable: 4503) // warning C4503: decorated name length exceeded, name was truncated 7 | #endif 8 | 9 | namespace py = pybind11; 10 | using namespace pybind11::literals; 11 | 12 | class test_initializer { 13 | using Initializer = void (*)(py::module &); 14 | 15 | public: 16 | test_initializer(Initializer init); 17 | test_initializer(const char *submodule_name, Initializer init); 18 | }; 19 | 20 | #define TEST_SUBMODULE(name, variable) \ 21 | void test_submodule_##name(py::module &); \ 22 | test_initializer name(#name, test_submodule_##name); \ 23 | void test_submodule_##name(py::module &variable) 24 | 25 | 26 | /// Dummy type which is not exported anywhere -- something to trigger a conversion error 27 | struct UnregisteredType { }; 28 | 29 | /// A user-defined type which is exported and can be used by any test 30 | class UserType { 31 | public: 32 | UserType() = default; 33 | UserType(int i) : i(i) { } 34 | 35 | int value() const { return i; } 36 | void set(int set) { i = set; } 37 | 38 | private: 39 | int i = -1; 40 | }; 41 | 42 | /// Like UserType, but increments `value` on copy for quick reference vs. copy tests 43 | class IncType : public UserType { 44 | public: 45 | using UserType::UserType; 46 | IncType() = default; 47 | IncType(const IncType &other) : IncType(other.value() + 1) { } 48 | IncType(IncType &&) = delete; 49 | IncType &operator=(const IncType &) = delete; 50 | IncType &operator=(IncType &&) = delete; 51 | }; 52 | 53 | /// Custom cast-only type that casts to a string "rvalue" or "lvalue" depending on the cast context. 54 | /// Used to test recursive casters (e.g. std::tuple, stl containers). 55 | struct RValueCaster {}; 56 | NAMESPACE_BEGIN(pybind11) 57 | NAMESPACE_BEGIN(detail) 58 | template<> class type_caster { 59 | public: 60 | PYBIND11_TYPE_CASTER(RValueCaster, _("RValueCaster")); 61 | static handle cast(RValueCaster &&, return_value_policy, handle) { return py::str("rvalue").release(); } 62 | static handle cast(const RValueCaster &, return_value_policy, handle) { return py::str("lvalue").release(); } 63 | }; 64 | NAMESPACE_END(detail) 65 | NAMESPACE_END(pybind11) 66 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | minversion = 3.0 3 | norecursedirs = test_cmake_build test_embed 4 | addopts = 5 | # show summary of skipped tests 6 | -rs 7 | # capture only Python print and C++ py::print, but not C output (low-level Python errors) 8 | --capture=sys 9 | filterwarnings = 10 | # make warnings into errors but ignore certain third-party extension issues 11 | error 12 | # importing scipy submodules on some version of Python 13 | ignore::ImportWarning 14 | # bogus numpy ABI warning (see numpy/#432) 15 | ignore:.*numpy.dtype size changed.*:RuntimeWarning 16 | ignore:.*numpy.ufunc size changed.*:RuntimeWarning 17 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_async.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_async.cpp -- __await__ support 3 | 4 | Copyright (c) 2019 Google Inc. 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #include "pybind11_tests.h" 11 | 12 | TEST_SUBMODULE(async_module, m) { 13 | struct DoesNotSupportAsync {}; 14 | py::class_(m, "DoesNotSupportAsync") 15 | .def(py::init<>()); 16 | struct SupportsAsync {}; 17 | py::class_(m, "SupportsAsync") 18 | .def(py::init<>()) 19 | .def("__await__", [](const SupportsAsync& self) -> py::object { 20 | static_cast(self); 21 | py::object loop = py::module::import("asyncio.events").attr("get_event_loop")(); 22 | py::object f = loop.attr("create_future")(); 23 | f.attr("set_result")(5); 24 | return f.attr("__await__")(); 25 | }); 26 | } 27 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_async.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import pytest 3 | from pybind11_tests import async_module as m 4 | 5 | 6 | @pytest.fixture 7 | def event_loop(): 8 | loop = asyncio.new_event_loop() 9 | yield loop 10 | loop.close() 11 | 12 | 13 | async def get_await_result(x): 14 | return await x 15 | 16 | 17 | def test_await(event_loop): 18 | assert 5 == event_loop.run_until_complete(get_await_result(m.SupportsAsync())) 19 | 20 | 21 | def test_await_missing(event_loop): 22 | with pytest.raises(TypeError): 23 | event_loop.run_until_complete(get_await_result(m.DoesNotSupportAsync())) 24 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_chrono.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_chrono.cpp -- test conversions to/from std::chrono types 3 | 4 | Copyright (c) 2016 Trent Houliston and 5 | Wenzel Jakob 6 | 7 | All rights reserved. Use of this source code is governed by a 8 | BSD-style license that can be found in the LICENSE file. 9 | */ 10 | 11 | #include "pybind11_tests.h" 12 | #include 13 | 14 | TEST_SUBMODULE(chrono, m) { 15 | using system_time = std::chrono::system_clock::time_point; 16 | using steady_time = std::chrono::steady_clock::time_point; 17 | 18 | using timespan = std::chrono::duration; 19 | using timestamp = std::chrono::time_point; 20 | 21 | // test_chrono_system_clock 22 | // Return the current time off the wall clock 23 | m.def("test_chrono1", []() { return std::chrono::system_clock::now(); }); 24 | 25 | // test_chrono_system_clock_roundtrip 26 | // Round trip the passed in system clock time 27 | m.def("test_chrono2", [](system_time t) { return t; }); 28 | 29 | // test_chrono_duration_roundtrip 30 | // Round trip the passed in duration 31 | m.def("test_chrono3", [](std::chrono::system_clock::duration d) { return d; }); 32 | 33 | // test_chrono_duration_subtraction_equivalence 34 | // Difference between two passed in time_points 35 | m.def("test_chrono4", [](system_time a, system_time b) { return a - b; }); 36 | 37 | // test_chrono_steady_clock 38 | // Return the current time off the steady_clock 39 | m.def("test_chrono5", []() { return std::chrono::steady_clock::now(); }); 40 | 41 | // test_chrono_steady_clock_roundtrip 42 | // Round trip a steady clock timepoint 43 | m.def("test_chrono6", [](steady_time t) { return t; }); 44 | 45 | // test_floating_point_duration 46 | // Roundtrip a duration in microseconds from a float argument 47 | m.def("test_chrono7", [](std::chrono::microseconds t) { return t; }); 48 | // Float durations (issue #719) 49 | m.def("test_chrono_float_diff", [](std::chrono::duration a, std::chrono::duration b) { 50 | return a - b; }); 51 | 52 | m.def("test_nano_timepoint", [](timestamp start, timespan delta) -> timestamp { 53 | return start + delta; 54 | }); 55 | } 56 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_custom_target(test_cmake_build) 2 | 3 | if(CMAKE_VERSION VERSION_LESS 3.1) 4 | # 3.0 needed for interface library for subdirectory_target/installed_target 5 | # 3.1 needed for cmake -E env for testing 6 | return() 7 | endif() 8 | 9 | include(CMakeParseArguments) 10 | function(pybind11_add_build_test name) 11 | cmake_parse_arguments(ARG "INSTALL" "" "" ${ARGN}) 12 | 13 | set(build_options "-DCMAKE_PREFIX_PATH=${PROJECT_BINARY_DIR}/mock_install" 14 | "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}" 15 | "-DPYTHON_EXECUTABLE:FILEPATH=${PYTHON_EXECUTABLE}" 16 | "-DPYBIND11_CPP_STANDARD=${PYBIND11_CPP_STANDARD}") 17 | if(NOT ARG_INSTALL) 18 | list(APPEND build_options "-DPYBIND11_PROJECT_DIR=${PROJECT_SOURCE_DIR}") 19 | endif() 20 | 21 | add_custom_target(test_${name} ${CMAKE_CTEST_COMMAND} 22 | --quiet --output-log ${name}.log 23 | --build-and-test "${CMAKE_CURRENT_SOURCE_DIR}/${name}" 24 | "${CMAKE_CURRENT_BINARY_DIR}/${name}" 25 | --build-config Release 26 | --build-noclean 27 | --build-generator ${CMAKE_GENERATOR} 28 | $<$:--build-generator-platform> ${CMAKE_GENERATOR_PLATFORM} 29 | --build-makeprogram ${CMAKE_MAKE_PROGRAM} 30 | --build-target check 31 | --build-options ${build_options} 32 | ) 33 | if(ARG_INSTALL) 34 | add_dependencies(test_${name} mock_install) 35 | endif() 36 | add_dependencies(test_cmake_build test_${name}) 37 | endfunction() 38 | 39 | pybind11_add_build_test(subdirectory_function) 40 | pybind11_add_build_test(subdirectory_target) 41 | if(NOT ${PYTHON_MODULE_EXTENSION} MATCHES "pypy") 42 | pybind11_add_build_test(subdirectory_embed) 43 | endif() 44 | 45 | if(PYBIND11_INSTALL) 46 | add_custom_target(mock_install ${CMAKE_COMMAND} 47 | "-DCMAKE_INSTALL_PREFIX=${PROJECT_BINARY_DIR}/mock_install" 48 | -P "${PROJECT_BINARY_DIR}/cmake_install.cmake" 49 | ) 50 | 51 | pybind11_add_build_test(installed_function INSTALL) 52 | pybind11_add_build_test(installed_target INSTALL) 53 | if(NOT ${PYTHON_MODULE_EXTENSION} MATCHES "pypy") 54 | pybind11_add_build_test(installed_embed INSTALL) 55 | endif() 56 | endif() 57 | 58 | add_dependencies(check test_cmake_build) 59 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/embed.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | namespace py = pybind11; 3 | 4 | PYBIND11_EMBEDDED_MODULE(test_cmake_build, m) { 5 | m.def("add", [](int i, int j) { return i + j; }); 6 | } 7 | 8 | int main(int argc, char *argv[]) { 9 | if (argc != 2) 10 | throw std::runtime_error("Expected test.py file as the first argument"); 11 | auto test_py_file = argv[1]; 12 | 13 | py::scoped_interpreter guard{}; 14 | 15 | auto m = py::module::import("test_cmake_build"); 16 | if (m.attr("add")(1, 2).cast() != 3) 17 | throw std::runtime_error("embed.cpp failed"); 18 | 19 | py::module::import("sys").attr("argv") = py::make_tuple("test.py", "embed.cpp"); 20 | py::eval_file(test_py_file, py::globals()); 21 | } 22 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/installed_embed/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0) 2 | project(test_installed_embed CXX) 3 | 4 | set(CMAKE_MODULE_PATH "") 5 | find_package(pybind11 CONFIG REQUIRED) 6 | message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}") 7 | 8 | add_executable(test_cmake_build ../embed.cpp) 9 | target_link_libraries(test_cmake_build PRIVATE pybind11::embed) 10 | 11 | # Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::embed). 12 | # This may be needed to resolve header conflicts, e.g. between Python release and debug headers. 13 | set_target_properties(test_cmake_build PROPERTIES NO_SYSTEM_FROM_IMPORTED ON) 14 | 15 | add_custom_target(check $ ${PROJECT_SOURCE_DIR}/../test.py) 16 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/installed_function/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.12) 2 | project(test_installed_module CXX) 3 | 4 | set(CMAKE_MODULE_PATH "") 5 | 6 | find_package(pybind11 CONFIG REQUIRED) 7 | message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}") 8 | 9 | pybind11_add_module(test_cmake_build SHARED NO_EXTRAS ../main.cpp) 10 | 11 | add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$ 12 | ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME}) 13 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/installed_target/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0) 2 | project(test_installed_target CXX) 3 | 4 | set(CMAKE_MODULE_PATH "") 5 | 6 | find_package(pybind11 CONFIG REQUIRED) 7 | message(STATUS "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}") 8 | 9 | add_library(test_cmake_build MODULE ../main.cpp) 10 | 11 | target_link_libraries(test_cmake_build PRIVATE pybind11::module) 12 | 13 | # make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib 14 | set_target_properties(test_cmake_build PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" 15 | SUFFIX "${PYTHON_MODULE_EXTENSION}") 16 | 17 | # Do not treat includes from IMPORTED target as SYSTEM (Python headers in pybind11::module). 18 | # This may be needed to resolve header conflicts, e.g. between Python release and debug headers. 19 | set_target_properties(test_cmake_build PROPERTIES NO_SYSTEM_FROM_IMPORTED ON) 20 | 21 | add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$ 22 | ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME}) 23 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | namespace py = pybind11; 3 | 4 | PYBIND11_MODULE(test_cmake_build, m) { 5 | m.def("add", [](int i, int j) { return i + j; }); 6 | } 7 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/subdirectory_embed/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0) 2 | project(test_subdirectory_embed CXX) 3 | 4 | set(PYBIND11_INSTALL ON CACHE BOOL "") 5 | set(PYBIND11_EXPORT_NAME test_export) 6 | 7 | add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11) 8 | 9 | # Test basic target functionality 10 | add_executable(test_cmake_build ../embed.cpp) 11 | target_link_libraries(test_cmake_build PRIVATE pybind11::embed) 12 | 13 | add_custom_target(check $ ${PROJECT_SOURCE_DIR}/../test.py) 14 | 15 | # Test custom export group -- PYBIND11_EXPORT_NAME 16 | add_library(test_embed_lib ../embed.cpp) 17 | target_link_libraries(test_embed_lib PRIVATE pybind11::embed) 18 | 19 | install(TARGETS test_embed_lib 20 | EXPORT test_export 21 | ARCHIVE DESTINATION bin 22 | LIBRARY DESTINATION lib 23 | RUNTIME DESTINATION lib) 24 | install(EXPORT test_export 25 | DESTINATION lib/cmake/test_export/test_export-Targets.cmake) 26 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/subdirectory_function/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.12) 2 | project(test_subdirectory_module CXX) 3 | 4 | add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11) 5 | pybind11_add_module(test_cmake_build THIN_LTO ../main.cpp) 6 | 7 | add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$ 8 | ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME}) 9 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/subdirectory_target/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0) 2 | project(test_subdirectory_target CXX) 3 | 4 | add_subdirectory(${PYBIND11_PROJECT_DIR} pybind11) 5 | 6 | add_library(test_cmake_build MODULE ../main.cpp) 7 | 8 | target_link_libraries(test_cmake_build PRIVATE pybind11::module) 9 | 10 | # make sure result is, for example, test_installed_target.so, not libtest_installed_target.dylib 11 | set_target_properties(test_cmake_build PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" 12 | SUFFIX "${PYTHON_MODULE_EXTENSION}") 13 | 14 | add_custom_target(check ${CMAKE_COMMAND} -E env PYTHONPATH=$ 15 | ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/../test.py ${PROJECT_NAME}) 16 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_cmake_build/test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import test_cmake_build 3 | 4 | assert test_cmake_build.add(1, 2) == 3 5 | print("{} imports, runs, and adds: 1 + 2 = 3".format(sys.argv[1])) 6 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_constants_and_functions.py: -------------------------------------------------------------------------------- 1 | from pybind11_tests import constants_and_functions as m 2 | 3 | 4 | def test_constants(): 5 | assert m.some_constant == 14 6 | 7 | 8 | def test_function_overloading(): 9 | assert m.test_function() == "test_function()" 10 | assert m.test_function(7) == "test_function(7)" 11 | assert m.test_function(m.MyEnum.EFirstEntry) == "test_function(enum=1)" 12 | assert m.test_function(m.MyEnum.ESecondEntry) == "test_function(enum=2)" 13 | 14 | assert m.test_function() == "test_function()" 15 | assert m.test_function("abcd") == "test_function(char *)" 16 | assert m.test_function(1, 1.0) == "test_function(int, float)" 17 | assert m.test_function(1, 1.0) == "test_function(int, float)" 18 | assert m.test_function(2.0, 2) == "test_function(float, int)" 19 | 20 | 21 | def test_bytes(): 22 | assert m.print_bytes(m.return_bytes()) == "bytes[1 0 2 0]" 23 | 24 | 25 | def test_exception_specifiers(): 26 | c = m.C() 27 | assert c.m1(2) == 1 28 | assert c.m2(3) == 1 29 | assert c.m3(5) == 2 30 | assert c.m4(7) == 3 31 | assert c.m5(10) == 5 32 | assert c.m6(14) == 8 33 | assert c.m7(20) == 13 34 | assert c.m8(29) == 21 35 | 36 | assert m.f1(33) == 34 37 | assert m.f2(53) == 55 38 | assert m.f3(86) == 89 39 | assert m.f4(140) == 144 40 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_docstring_options.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_docstring_options.cpp -- generation of docstrings and signatures 3 | 4 | Copyright (c) 2016 Wenzel Jakob 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #include "pybind11_tests.h" 11 | 12 | TEST_SUBMODULE(docstring_options, m) { 13 | // test_docstring_options 14 | { 15 | py::options options; 16 | options.disable_function_signatures(); 17 | 18 | m.def("test_function1", [](int, int) {}, py::arg("a"), py::arg("b")); 19 | m.def("test_function2", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); 20 | 21 | m.def("test_overloaded1", [](int) {}, py::arg("i"), "Overload docstring"); 22 | m.def("test_overloaded1", [](double) {}, py::arg("d")); 23 | 24 | m.def("test_overloaded2", [](int) {}, py::arg("i"), "overload docstring 1"); 25 | m.def("test_overloaded2", [](double) {}, py::arg("d"), "overload docstring 2"); 26 | 27 | m.def("test_overloaded3", [](int) {}, py::arg("i")); 28 | m.def("test_overloaded3", [](double) {}, py::arg("d"), "Overload docstr"); 29 | 30 | options.enable_function_signatures(); 31 | 32 | m.def("test_function3", [](int, int) {}, py::arg("a"), py::arg("b")); 33 | m.def("test_function4", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); 34 | 35 | options.disable_function_signatures().disable_user_defined_docstrings(); 36 | 37 | m.def("test_function5", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); 38 | 39 | { 40 | py::options nested_options; 41 | nested_options.enable_user_defined_docstrings(); 42 | m.def("test_function6", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); 43 | } 44 | } 45 | 46 | m.def("test_function7", [](int, int) {}, py::arg("a"), py::arg("b"), "A custom docstring"); 47 | 48 | { 49 | py::options options; 50 | options.disable_user_defined_docstrings(); 51 | 52 | struct DocstringTestFoo { 53 | int value; 54 | void setValue(int v) { value = v; } 55 | int getValue() const { return value; } 56 | }; 57 | py::class_(m, "DocstringTestFoo", "This is a class docstring") 58 | .def_property("value_prop", &DocstringTestFoo::getValue, &DocstringTestFoo::setValue, "This is a property docstring") 59 | ; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_docstring_options.py: -------------------------------------------------------------------------------- 1 | from pybind11_tests import docstring_options as m 2 | 3 | 4 | def test_docstring_options(): 5 | # options.disable_function_signatures() 6 | assert not m.test_function1.__doc__ 7 | 8 | assert m.test_function2.__doc__ == "A custom docstring" 9 | 10 | # docstring specified on just the first overload definition: 11 | assert m.test_overloaded1.__doc__ == "Overload docstring" 12 | 13 | # docstring on both overloads: 14 | assert m.test_overloaded2.__doc__ == "overload docstring 1\noverload docstring 2" 15 | 16 | # docstring on only second overload: 17 | assert m.test_overloaded3.__doc__ == "Overload docstr" 18 | 19 | # options.enable_function_signatures() 20 | assert m.test_function3.__doc__ .startswith("test_function3(a: int, b: int) -> None") 21 | 22 | assert m.test_function4.__doc__ .startswith("test_function4(a: int, b: int) -> None") 23 | assert m.test_function4.__doc__ .endswith("A custom docstring\n") 24 | 25 | # options.disable_function_signatures() 26 | # options.disable_user_defined_docstrings() 27 | assert not m.test_function5.__doc__ 28 | 29 | # nested options.enable_user_defined_docstrings() 30 | assert m.test_function6.__doc__ == "A custom docstring" 31 | 32 | # RAII destructor 33 | assert m.test_function7.__doc__ .startswith("test_function7(a: int, b: int) -> None") 34 | assert m.test_function7.__doc__ .endswith("A custom docstring\n") 35 | 36 | # Suppression of user-defined docstrings for non-function objects 37 | assert not m.DocstringTestFoo.__doc__ 38 | assert not m.DocstringTestFoo.value_prop.__doc__ 39 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_embed/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(${PYTHON_MODULE_EXTENSION} MATCHES "pypy") 2 | add_custom_target(cpptest) # Dummy target on PyPy. Embedding is not supported. 3 | set(_suppress_unused_variable_warning "${DOWNLOAD_CATCH}") 4 | return() 5 | endif() 6 | 7 | find_package(Catch 1.9.3) 8 | if(CATCH_FOUND) 9 | message(STATUS "Building interpreter tests using Catch v${CATCH_VERSION}") 10 | else() 11 | message(STATUS "Catch not detected. Interpreter tests will be skipped. Install Catch headers" 12 | " manually or use `cmake -DDOWNLOAD_CATCH=1` to fetch them automatically.") 13 | return() 14 | endif() 15 | 16 | add_executable(test_embed 17 | catch.cpp 18 | test_interpreter.cpp 19 | ) 20 | target_include_directories(test_embed PRIVATE ${CATCH_INCLUDE_DIR}) 21 | pybind11_enable_warnings(test_embed) 22 | 23 | if(NOT CMAKE_VERSION VERSION_LESS 3.0) 24 | target_link_libraries(test_embed PRIVATE pybind11::embed) 25 | else() 26 | target_include_directories(test_embed PRIVATE ${PYBIND11_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS}) 27 | target_compile_options(test_embed PRIVATE ${PYBIND11_CPP_STANDARD}) 28 | target_link_libraries(test_embed PRIVATE ${PYTHON_LIBRARIES}) 29 | endif() 30 | 31 | find_package(Threads REQUIRED) 32 | target_link_libraries(test_embed PUBLIC ${CMAKE_THREAD_LIBS_INIT}) 33 | 34 | add_custom_target(cpptest COMMAND $ 35 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) 36 | 37 | pybind11_add_module(external_module THIN_LTO external_module.cpp) 38 | set_target_properties(external_module PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) 39 | add_dependencies(cpptest external_module) 40 | 41 | add_dependencies(check cpptest) 42 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_embed/catch.cpp: -------------------------------------------------------------------------------- 1 | // The Catch implementation is compiled here. This is a standalone 2 | // translation unit to avoid recompiling it for every test change. 3 | 4 | #include 5 | 6 | #ifdef _MSC_VER 7 | // Silence MSVC C++17 deprecation warning from Catch regarding std::uncaught_exceptions (up to catch 8 | // 2.0.1; this should be fixed in the next catch release after 2.0.1). 9 | # pragma warning(disable: 4996) 10 | #endif 11 | 12 | #define CATCH_CONFIG_RUNNER 13 | #include 14 | 15 | namespace py = pybind11; 16 | 17 | int main(int argc, char *argv[]) { 18 | py::scoped_interpreter guard{}; 19 | auto result = Catch::Session().run(argc, argv); 20 | 21 | return result < 0xff ? result : 0xff; 22 | } 23 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_embed/external_module.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace py = pybind11; 4 | 5 | /* Simple test module/test class to check that the referenced internals data of external pybind11 6 | * modules aren't preserved over a finalize/initialize. 7 | */ 8 | 9 | PYBIND11_MODULE(external_module, m) { 10 | class A { 11 | public: 12 | A(int value) : v{value} {}; 13 | int v; 14 | }; 15 | 16 | py::class_(m, "A") 17 | .def(py::init()) 18 | .def_readwrite("value", &A::v); 19 | 20 | m.def("internals_at", []() { 21 | return reinterpret_cast(&py::detail::get_internals()); 22 | }); 23 | } 24 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_embed/test_interpreter.py: -------------------------------------------------------------------------------- 1 | from widget_module import Widget 2 | 3 | 4 | class DerivedWidget(Widget): 5 | def __init__(self, message): 6 | super(DerivedWidget, self).__init__(message) 7 | 8 | def the_answer(self): 9 | return 42 10 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_enum.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_enums.cpp -- enumerations 3 | 4 | Copyright (c) 2016 Wenzel Jakob 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #include "pybind11_tests.h" 11 | 12 | TEST_SUBMODULE(enums, m) { 13 | // test_unscoped_enum 14 | enum UnscopedEnum { 15 | EOne = 1, 16 | ETwo, 17 | EThree 18 | }; 19 | py::enum_(m, "UnscopedEnum", py::arithmetic(), "An unscoped enumeration") 20 | .value("EOne", EOne, "Docstring for EOne") 21 | .value("ETwo", ETwo, "Docstring for ETwo") 22 | .value("EThree", EThree, "Docstring for EThree") 23 | .export_values(); 24 | 25 | // test_scoped_enum 26 | enum class ScopedEnum { 27 | Two = 2, 28 | Three 29 | }; 30 | py::enum_(m, "ScopedEnum", py::arithmetic()) 31 | .value("Two", ScopedEnum::Two) 32 | .value("Three", ScopedEnum::Three); 33 | 34 | m.def("test_scoped_enum", [](ScopedEnum z) { 35 | return "ScopedEnum::" + std::string(z == ScopedEnum::Two ? "Two" : "Three"); 36 | }); 37 | 38 | // test_binary_operators 39 | enum Flags { 40 | Read = 4, 41 | Write = 2, 42 | Execute = 1 43 | }; 44 | py::enum_(m, "Flags", py::arithmetic()) 45 | .value("Read", Flags::Read) 46 | .value("Write", Flags::Write) 47 | .value("Execute", Flags::Execute) 48 | .export_values(); 49 | 50 | // test_implicit_conversion 51 | class ClassWithUnscopedEnum { 52 | public: 53 | enum EMode { 54 | EFirstMode = 1, 55 | ESecondMode 56 | }; 57 | 58 | static EMode test_function(EMode mode) { 59 | return mode; 60 | } 61 | }; 62 | py::class_ exenum_class(m, "ClassWithUnscopedEnum"); 63 | exenum_class.def_static("test_function", &ClassWithUnscopedEnum::test_function); 64 | py::enum_(exenum_class, "EMode") 65 | .value("EFirstMode", ClassWithUnscopedEnum::EFirstMode) 66 | .value("ESecondMode", ClassWithUnscopedEnum::ESecondMode) 67 | .export_values(); 68 | 69 | // test_enum_to_int 70 | m.def("test_enum_to_int", [](int) { }); 71 | m.def("test_enum_to_uint", [](uint32_t) { }); 72 | m.def("test_enum_to_long_long", [](long long) { }); 73 | 74 | // test_duplicate_enum_name 75 | enum SimpleEnum 76 | { 77 | ONE, TWO, THREE 78 | }; 79 | 80 | m.def("register_bad_enum", [m]() { 81 | py::enum_(m, "SimpleEnum") 82 | .value("ONE", SimpleEnum::ONE) //NOTE: all value function calls are called with the same first parameter value 83 | .value("ONE", SimpleEnum::TWO) 84 | .value("ONE", SimpleEnum::THREE) 85 | .export_values(); 86 | }); 87 | } 88 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_eval.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_eval.cpp -- Usage of eval() and eval_file() 3 | 4 | Copyright (c) 2016 Klemens D. Morgenstern 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | 11 | #include 12 | #include "pybind11_tests.h" 13 | 14 | TEST_SUBMODULE(eval_, m) { 15 | // test_evals 16 | 17 | auto global = py::dict(py::module::import("__main__").attr("__dict__")); 18 | 19 | m.def("test_eval_statements", [global]() { 20 | auto local = py::dict(); 21 | local["call_test"] = py::cpp_function([&]() -> int { 22 | return 42; 23 | }); 24 | 25 | // Regular string literal 26 | py::exec( 27 | "message = 'Hello World!'\n" 28 | "x = call_test()", 29 | global, local 30 | ); 31 | 32 | // Multi-line raw string literal 33 | py::exec(R"( 34 | if x == 42: 35 | print(message) 36 | else: 37 | raise RuntimeError 38 | )", global, local 39 | ); 40 | auto x = local["x"].cast(); 41 | 42 | return x == 42; 43 | }); 44 | 45 | m.def("test_eval", [global]() { 46 | auto local = py::dict(); 47 | local["x"] = py::int_(42); 48 | auto x = py::eval("x", global, local); 49 | return x.cast() == 42; 50 | }); 51 | 52 | m.def("test_eval_single_statement", []() { 53 | auto local = py::dict(); 54 | local["call_test"] = py::cpp_function([&]() -> int { 55 | return 42; 56 | }); 57 | 58 | auto result = py::eval("x = call_test()", py::dict(), local); 59 | auto x = local["x"].cast(); 60 | return result.is_none() && x == 42; 61 | }); 62 | 63 | m.def("test_eval_file", [global](py::str filename) { 64 | auto local = py::dict(); 65 | local["y"] = py::int_(43); 66 | 67 | int val_out; 68 | local["call_test2"] = py::cpp_function([&](int value) { val_out = value; }); 69 | 70 | auto result = py::eval_file(filename, global, local); 71 | return val_out == 43 && result.is_none(); 72 | }); 73 | 74 | m.def("test_eval_failure", []() { 75 | try { 76 | py::eval("nonsense code ..."); 77 | } catch (py::error_already_set &) { 78 | return true; 79 | } 80 | return false; 81 | }); 82 | 83 | m.def("test_eval_file_failure", []() { 84 | try { 85 | py::eval_file("non-existing file"); 86 | } catch (std::exception &) { 87 | return true; 88 | } 89 | return false; 90 | }); 91 | } 92 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pybind11_tests import eval_ as m 3 | 4 | 5 | def test_evals(capture): 6 | with capture: 7 | assert m.test_eval_statements() 8 | assert capture == "Hello World!" 9 | 10 | assert m.test_eval() 11 | assert m.test_eval_single_statement() 12 | 13 | filename = os.path.join(os.path.dirname(__file__), "test_eval_call.py") 14 | assert m.test_eval_file(filename) 15 | 16 | assert m.test_eval_failure() 17 | assert m.test_eval_file_failure() 18 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_eval_call.py: -------------------------------------------------------------------------------- 1 | # This file is called from 'test_eval.py' 2 | 3 | if 'call_test2' in locals(): 4 | call_test2(y) # noqa: F821 undefined name 5 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_gil_scoped.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_gil_scoped.cpp -- acquire and release gil 3 | 4 | Copyright (c) 2017 Borja Zarco (Google LLC) 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #include "pybind11_tests.h" 11 | #include 12 | 13 | 14 | class VirtClass { 15 | public: 16 | virtual ~VirtClass() {} 17 | virtual void virtual_func() {} 18 | virtual void pure_virtual_func() = 0; 19 | }; 20 | 21 | class PyVirtClass : public VirtClass { 22 | void virtual_func() override { 23 | PYBIND11_OVERLOAD(void, VirtClass, virtual_func,); 24 | } 25 | void pure_virtual_func() override { 26 | PYBIND11_OVERLOAD_PURE(void, VirtClass, pure_virtual_func,); 27 | } 28 | }; 29 | 30 | TEST_SUBMODULE(gil_scoped, m) { 31 | py::class_(m, "VirtClass") 32 | .def(py::init<>()) 33 | .def("virtual_func", &VirtClass::virtual_func) 34 | .def("pure_virtual_func", &VirtClass::pure_virtual_func); 35 | 36 | m.def("test_callback_py_obj", 37 | [](py::object func) { func(); }); 38 | m.def("test_callback_std_func", 39 | [](const std::function &func) { func(); }); 40 | m.def("test_callback_virtual_func", 41 | [](VirtClass &virt) { virt.virtual_func(); }); 42 | m.def("test_callback_pure_virtual_func", 43 | [](VirtClass &virt) { virt.pure_virtual_func(); }); 44 | m.def("test_cross_module_gil", 45 | []() { 46 | auto cm = py::module::import("cross_module_gil_utils"); 47 | auto gil_acquire = reinterpret_cast( 48 | PyLong_AsVoidPtr(cm.attr("gil_acquire_funcaddr").ptr())); 49 | py::gil_scoped_release gil_release; 50 | gil_acquire(); 51 | }); 52 | } 53 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_iostream.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_iostream.cpp -- Usage of scoped_output_redirect 3 | 4 | Copyright (c) 2017 Henry F. Schreiner 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | 11 | #include 12 | #include "pybind11_tests.h" 13 | #include 14 | 15 | 16 | void noisy_function(std::string msg, bool flush) { 17 | 18 | std::cout << msg; 19 | if (flush) 20 | std::cout << std::flush; 21 | } 22 | 23 | void noisy_funct_dual(std::string msg, std::string emsg) { 24 | std::cout << msg; 25 | std::cerr << emsg; 26 | } 27 | 28 | TEST_SUBMODULE(iostream, m) { 29 | 30 | add_ostream_redirect(m); 31 | 32 | // test_evals 33 | 34 | m.def("captured_output_default", [](std::string msg) { 35 | py::scoped_ostream_redirect redir; 36 | std::cout << msg << std::flush; 37 | }); 38 | 39 | m.def("captured_output", [](std::string msg) { 40 | py::scoped_ostream_redirect redir(std::cout, py::module::import("sys").attr("stdout")); 41 | std::cout << msg << std::flush; 42 | }); 43 | 44 | m.def("guard_output", &noisy_function, 45 | py::call_guard(), 46 | py::arg("msg"), py::arg("flush")=true); 47 | 48 | m.def("captured_err", [](std::string msg) { 49 | py::scoped_ostream_redirect redir(std::cerr, py::module::import("sys").attr("stderr")); 50 | std::cerr << msg << std::flush; 51 | }); 52 | 53 | m.def("noisy_function", &noisy_function, py::arg("msg"), py::arg("flush") = true); 54 | 55 | m.def("dual_guard", &noisy_funct_dual, 56 | py::call_guard(), 57 | py::arg("msg"), py::arg("emsg")); 58 | 59 | m.def("raw_output", [](std::string msg) { 60 | std::cout << msg << std::flush; 61 | }); 62 | 63 | m.def("raw_err", [](std::string msg) { 64 | std::cerr << msg << std::flush; 65 | }); 66 | 67 | m.def("captured_dual", [](std::string msg, std::string emsg) { 68 | py::scoped_ostream_redirect redirout(std::cout, py::module::import("sys").attr("stdout")); 69 | py::scoped_ostream_redirect redirerr(std::cerr, py::module::import("sys").attr("stderr")); 70 | std::cout << msg << std::flush; 71 | std::cerr << emsg << std::flush; 72 | }); 73 | } 74 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_modules.py: -------------------------------------------------------------------------------- 1 | from pybind11_tests import modules as m 2 | from pybind11_tests.modules import subsubmodule as ms 3 | from pybind11_tests import ConstructorStats 4 | 5 | 6 | def test_nested_modules(): 7 | import pybind11_tests 8 | assert pybind11_tests.__name__ == "pybind11_tests" 9 | assert pybind11_tests.modules.__name__ == "pybind11_tests.modules" 10 | assert pybind11_tests.modules.subsubmodule.__name__ == "pybind11_tests.modules.subsubmodule" 11 | assert m.__name__ == "pybind11_tests.modules" 12 | assert ms.__name__ == "pybind11_tests.modules.subsubmodule" 13 | 14 | assert ms.submodule_func() == "submodule_func()" 15 | 16 | 17 | def test_reference_internal(): 18 | b = ms.B() 19 | assert str(b.get_a1()) == "A[1]" 20 | assert str(b.a1) == "A[1]" 21 | assert str(b.get_a2()) == "A[2]" 22 | assert str(b.a2) == "A[2]" 23 | 24 | b.a1 = ms.A(42) 25 | b.a2 = ms.A(43) 26 | assert str(b.get_a1()) == "A[42]" 27 | assert str(b.a1) == "A[42]" 28 | assert str(b.get_a2()) == "A[43]" 29 | assert str(b.a2) == "A[43]" 30 | 31 | astats, bstats = ConstructorStats.get(ms.A), ConstructorStats.get(ms.B) 32 | assert astats.alive() == 2 33 | assert bstats.alive() == 1 34 | del b 35 | assert astats.alive() == 0 36 | assert bstats.alive() == 0 37 | assert astats.values() == ['1', '2', '42', '43'] 38 | assert bstats.values() == [] 39 | assert astats.default_constructions == 0 40 | assert bstats.default_constructions == 1 41 | assert astats.copy_constructions == 0 42 | assert bstats.copy_constructions == 0 43 | # assert astats.move_constructions >= 0 # Don't invoke any 44 | # assert bstats.move_constructions >= 0 # Don't invoke any 45 | assert astats.copy_assignments == 2 46 | assert bstats.copy_assignments == 0 47 | assert astats.move_assignments == 0 48 | assert bstats.move_assignments == 0 49 | 50 | 51 | def test_importing(): 52 | from pybind11_tests.modules import OD 53 | from collections import OrderedDict 54 | 55 | assert OD is OrderedDict 56 | assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])" 57 | 58 | 59 | def test_pydoc(): 60 | """Pydoc needs to be able to provide help() for everything inside a pybind11 module""" 61 | import pybind11_tests 62 | import pydoc 63 | 64 | assert pybind11_tests.__name__ == "pybind11_tests" 65 | assert pybind11_tests.__doc__ == "pybind11 test module" 66 | assert pydoc.text.docmodule(pybind11_tests) 67 | 68 | 69 | def test_duplicate_registration(): 70 | """Registering two things with the same name""" 71 | 72 | assert m.duplicate_registration() == [] 73 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_opaque_types.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_opaque_types.cpp -- opaque types, passing void pointers 3 | 4 | Copyright (c) 2016 Wenzel Jakob 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #include "pybind11_tests.h" 11 | #include 12 | #include 13 | 14 | // IMPORTANT: Disable internal pybind11 translation mechanisms for STL data structures 15 | // 16 | // This also deliberately doesn't use the below StringList type alias to test 17 | // that MAKE_OPAQUE can handle a type containing a `,`. (The `std::allocator` 18 | // bit is just the default `std::vector` allocator). 19 | PYBIND11_MAKE_OPAQUE(std::vector>); 20 | 21 | using StringList = std::vector>; 22 | 23 | TEST_SUBMODULE(opaque_types, m) { 24 | // test_string_list 25 | py::class_(m, "StringList") 26 | .def(py::init<>()) 27 | .def("pop_back", &StringList::pop_back) 28 | /* There are multiple versions of push_back(), etc. Select the right ones. */ 29 | .def("push_back", (void (StringList::*)(const std::string &)) &StringList::push_back) 30 | .def("back", (std::string &(StringList::*)()) &StringList::back) 31 | .def("__len__", [](const StringList &v) { return v.size(); }) 32 | .def("__iter__", [](StringList &v) { 33 | return py::make_iterator(v.begin(), v.end()); 34 | }, py::keep_alive<0, 1>()); 35 | 36 | class ClassWithSTLVecProperty { 37 | public: 38 | StringList stringList; 39 | }; 40 | py::class_(m, "ClassWithSTLVecProperty") 41 | .def(py::init<>()) 42 | .def_readwrite("stringList", &ClassWithSTLVecProperty::stringList); 43 | 44 | m.def("print_opaque_list", [](const StringList &l) { 45 | std::string ret = "Opaque list: ["; 46 | bool first = true; 47 | for (auto entry : l) { 48 | if (!first) 49 | ret += ", "; 50 | ret += entry; 51 | first = false; 52 | } 53 | return ret + "]"; 54 | }); 55 | 56 | // test_pointers 57 | m.def("return_void_ptr", []() { return (void *) 0x1234; }); 58 | m.def("get_void_ptr_value", [](void *ptr) { return reinterpret_cast(ptr); }); 59 | m.def("return_null_str", []() { return (char *) nullptr; }); 60 | m.def("get_null_str_value", [](char *ptr) { return reinterpret_cast(ptr); }); 61 | 62 | m.def("return_unique_ptr", []() -> std::unique_ptr { 63 | StringList *result = new StringList(); 64 | result->push_back("some value"); 65 | return std::unique_ptr(result); 66 | }); 67 | } 68 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_opaque_types.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pybind11_tests import opaque_types as m 3 | from pybind11_tests import ConstructorStats, UserType 4 | 5 | 6 | def test_string_list(): 7 | lst = m.StringList() 8 | lst.push_back("Element 1") 9 | lst.push_back("Element 2") 10 | assert m.print_opaque_list(lst) == "Opaque list: [Element 1, Element 2]" 11 | assert lst.back() == "Element 2" 12 | 13 | for i, k in enumerate(lst, start=1): 14 | assert k == "Element {}".format(i) 15 | lst.pop_back() 16 | assert m.print_opaque_list(lst) == "Opaque list: [Element 1]" 17 | 18 | cvp = m.ClassWithSTLVecProperty() 19 | assert m.print_opaque_list(cvp.stringList) == "Opaque list: []" 20 | 21 | cvp.stringList = lst 22 | cvp.stringList.push_back("Element 3") 23 | assert m.print_opaque_list(cvp.stringList) == "Opaque list: [Element 1, Element 3]" 24 | 25 | 26 | def test_pointers(msg): 27 | living_before = ConstructorStats.get(UserType).alive() 28 | assert m.get_void_ptr_value(m.return_void_ptr()) == 0x1234 29 | assert m.get_void_ptr_value(UserType()) # Should also work for other C++ types 30 | assert ConstructorStats.get(UserType).alive() == living_before 31 | 32 | with pytest.raises(TypeError) as excinfo: 33 | m.get_void_ptr_value([1, 2, 3]) # This should not work 34 | assert msg(excinfo.value) == """ 35 | get_void_ptr_value(): incompatible function arguments. The following argument types are supported: 36 | 1. (arg0: capsule) -> int 37 | 38 | Invoked with: [1, 2, 3] 39 | """ # noqa: E501 line too long 40 | 41 | assert m.return_null_str() is None 42 | assert m.get_null_str_value(m.return_null_str()) is not None 43 | 44 | ptr = m.return_unique_ptr() 45 | assert "StringList" in repr(ptr) 46 | assert m.print_opaque_list(ptr) == "Opaque list: [some value]" 47 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_pickling.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pybind11_tests import pickling as m 3 | 4 | try: 5 | import cPickle as pickle # Use cPickle on Python 2.7 6 | except ImportError: 7 | import pickle 8 | 9 | 10 | @pytest.mark.parametrize("cls_name", ["Pickleable", "PickleableNew"]) 11 | def test_roundtrip(cls_name): 12 | cls = getattr(m, cls_name) 13 | p = cls("test_value") 14 | p.setExtra1(15) 15 | p.setExtra2(48) 16 | 17 | data = pickle.dumps(p, 2) # Must use pickle protocol >= 2 18 | p2 = pickle.loads(data) 19 | assert p2.value() == p.value() 20 | assert p2.extra1() == p.extra1() 21 | assert p2.extra2() == p.extra2() 22 | 23 | 24 | @pytest.unsupported_on_pypy 25 | @pytest.mark.parametrize("cls_name", ["PickleableWithDict", "PickleableWithDictNew"]) 26 | def test_roundtrip_with_dict(cls_name): 27 | cls = getattr(m, cls_name) 28 | p = cls("test_value") 29 | p.extra = 15 30 | p.dynamic = "Attribute" 31 | 32 | data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL) 33 | p2 = pickle.loads(data) 34 | assert p2.value == p.value 35 | assert p2.extra == p.extra 36 | assert p2.dynamic == p.dynamic 37 | 38 | 39 | def test_enum_pickle(): 40 | from pybind11_tests import enums as e 41 | data = pickle.dumps(e.EOne, 2) 42 | assert e.EOne == pickle.loads(data) 43 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_tagbased_polymorphic.py: -------------------------------------------------------------------------------- 1 | from pybind11_tests import tagbased_polymorphic as m 2 | 3 | 4 | def test_downcast(): 5 | zoo = m.create_zoo() 6 | assert [type(animal) for animal in zoo] == [ 7 | m.Labrador, m.Dog, m.Chihuahua, m.Cat, m.Panther 8 | ] 9 | assert [animal.name for animal in zoo] == [ 10 | "Fido", "Ginger", "Hertzl", "Tiger", "Leo" 11 | ] 12 | zoo[1].sound = "woooooo" 13 | assert [dog.bark() for dog in zoo[:3]] == [ 14 | "Labrador Fido goes WOOF!", 15 | "Dog Ginger goes woooooo", 16 | "Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles" 17 | ] 18 | assert [cat.purr() for cat in zoo[3:]] == ["mrowr", "mrrrRRRRRR"] 19 | zoo[0].excitement -= 1000 20 | assert zoo[0].excitement == 14000 21 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_union.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | tests/test_class.cpp -- test py::class_ definitions and basic functionality 3 | 4 | Copyright (c) 2019 Roland Dreier 5 | 6 | All rights reserved. Use of this source code is governed by a 7 | BSD-style license that can be found in the LICENSE file. 8 | */ 9 | 10 | #include "pybind11_tests.h" 11 | 12 | TEST_SUBMODULE(union_, m) { 13 | union TestUnion { 14 | int value_int; 15 | unsigned value_uint; 16 | }; 17 | 18 | py::class_(m, "TestUnion") 19 | .def(py::init<>()) 20 | .def_readonly("as_int", &TestUnion::value_int) 21 | .def_readwrite("as_uint", &TestUnion::value_uint); 22 | } 23 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tests/test_union.py: -------------------------------------------------------------------------------- 1 | from pybind11_tests import union_ as m 2 | 3 | 4 | def test_union(): 5 | instance = m.TestUnion() 6 | 7 | instance.as_uint = 10 8 | assert instance.as_int == 10 9 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tools/FindCatch.cmake: -------------------------------------------------------------------------------- 1 | # - Find the Catch test framework or download it (single header) 2 | # 3 | # This is a quick module for internal use. It assumes that Catch is 4 | # REQUIRED and that a minimum version is provided (not EXACT). If 5 | # a suitable version isn't found locally, the single header file 6 | # will be downloaded and placed in the build dir: PROJECT_BINARY_DIR. 7 | # 8 | # This code sets the following variables: 9 | # CATCH_INCLUDE_DIR - path to catch.hpp 10 | # CATCH_VERSION - version number 11 | 12 | if(NOT Catch_FIND_VERSION) 13 | message(FATAL_ERROR "A version number must be specified.") 14 | elseif(Catch_FIND_REQUIRED) 15 | message(FATAL_ERROR "This module assumes Catch is not required.") 16 | elseif(Catch_FIND_VERSION_EXACT) 17 | message(FATAL_ERROR "Exact version numbers are not supported, only minimum.") 18 | endif() 19 | 20 | # Extract the version number from catch.hpp 21 | function(_get_catch_version) 22 | file(STRINGS "${CATCH_INCLUDE_DIR}/catch.hpp" version_line REGEX "Catch v.*" LIMIT_COUNT 1) 23 | if(version_line MATCHES "Catch v([0-9]+)\\.([0-9]+)\\.([0-9]+)") 24 | set(CATCH_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}" PARENT_SCOPE) 25 | endif() 26 | endfunction() 27 | 28 | # Download the single-header version of Catch 29 | function(_download_catch version destination_dir) 30 | message(STATUS "Downloading catch v${version}...") 31 | set(url https://github.com/philsquared/Catch/releases/download/v${version}/catch.hpp) 32 | file(DOWNLOAD ${url} "${destination_dir}/catch.hpp" STATUS status) 33 | list(GET status 0 error) 34 | if(error) 35 | message(FATAL_ERROR "Could not download ${url}") 36 | endif() 37 | set(CATCH_INCLUDE_DIR "${destination_dir}" CACHE INTERNAL "") 38 | endfunction() 39 | 40 | # Look for catch locally 41 | find_path(CATCH_INCLUDE_DIR NAMES catch.hpp PATH_SUFFIXES catch) 42 | if(CATCH_INCLUDE_DIR) 43 | _get_catch_version() 44 | endif() 45 | 46 | # Download the header if it wasn't found or if it's outdated 47 | if(NOT CATCH_VERSION OR CATCH_VERSION VERSION_LESS ${Catch_FIND_VERSION}) 48 | if(DOWNLOAD_CATCH) 49 | _download_catch(${Catch_FIND_VERSION} "${PROJECT_BINARY_DIR}/catch/") 50 | _get_catch_version() 51 | else() 52 | set(CATCH_FOUND FALSE) 53 | return() 54 | endif() 55 | endif() 56 | 57 | set(CATCH_FOUND TRUE) 58 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tools/check-style.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to check include/test code for common pybind11 code style errors. 4 | # 5 | # This script currently checks for 6 | # 7 | # 1. use of tabs instead of spaces 8 | # 2. MSDOS-style CRLF endings 9 | # 3. trailing spaces 10 | # 4. missing space between keyword and parenthesis, e.g.: for(, if(, while( 11 | # 5. Missing space between right parenthesis and brace, e.g. 'for (...){' 12 | # 6. opening brace on its own line. It should always be on the same line as the 13 | # if/while/for/do statement. 14 | # 15 | # Invoke as: tools/check-style.sh 16 | # 17 | 18 | check_style_errors=0 19 | IFS=$'\n' 20 | 21 | found="$( GREP_COLORS='mt=41' GREP_COLOR='41' grep $'\t' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )" 22 | if [ -n "$found" ]; then 23 | # The mt=41 sets a red background for matched tabs: 24 | echo -e '\033[31;01mError: found tab characters in the following files:\033[0m' 25 | check_style_errors=1 26 | echo "$found" | sed -e 's/^/ /' 27 | fi 28 | 29 | 30 | found="$( grep -IUlr $'\r' include tests/*.{cpp,py,h} docs/*.rst --color=always )" 31 | if [ -n "$found" ]; then 32 | echo -e '\033[31;01mError: found CRLF characters in the following files:\033[0m' 33 | check_style_errors=1 34 | echo "$found" | sed -e 's/^/ /' 35 | fi 36 | 37 | found="$(GREP_COLORS='mt=41' GREP_COLOR='41' grep '[[:blank:]]\+$' include tests/*.{cpp,py,h} docs/*.rst -rn --color=always )" 38 | if [ -n "$found" ]; then 39 | # The mt=41 sets a red background for matched trailing spaces 40 | echo -e '\033[31;01mError: found trailing spaces in the following files:\033[0m' 41 | check_style_errors=1 42 | echo "$found" | sed -e 's/^/ /' 43 | fi 44 | 45 | found="$(grep '\<\(if\|for\|while\|catch\)(\|){' include tests/*.{cpp,h} -rn --color=always)" 46 | if [ -n "$found" ]; then 47 | echo -e '\033[31;01mError: found the following coding style problems:\033[0m' 48 | check_style_errors=1 49 | echo "$found" | sed -e 's/^/ /' 50 | fi 51 | 52 | found="$(awk ' 53 | function prefix(filename, lineno) { 54 | return " \033[35m" filename "\033[36m:\033[32m" lineno "\033[36m:\033[0m" 55 | } 56 | function mark(pattern, string) { sub(pattern, "\033[01;31m&\033[0m", string); return string } 57 | last && /^\s*{/ { 58 | print prefix(FILENAME, FNR-1) mark("\\)\\s*$", last) 59 | print prefix(FILENAME, FNR) mark("^\\s*{", $0) 60 | last="" 61 | } 62 | { last = /(if|for|while|catch|switch)\s*\(.*\)\s*$/ ? $0 : "" } 63 | ' $(find include -type f) tests/*.{cpp,h} docs/*.rst)" 64 | if [ -n "$found" ]; then 65 | check_style_errors=1 66 | echo -e '\033[31;01mError: braces should occur on the same line as the if/while/.. statement. Found issues in the following files:\033[0m' 67 | echo "$found" 68 | fi 69 | 70 | exit $check_style_errors 71 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/pybind11-master/tools/libsize.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import os 3 | import sys 4 | 5 | # Internal build script for generating debugging test .so size. 6 | # Usage: 7 | # python libsize.py file.so save.txt -- displays the size of file.so and, if save.txt exists, compares it to the 8 | # size in it, then overwrites save.txt with the new size for future runs. 9 | 10 | if len(sys.argv) != 3: 11 | sys.exit("Invalid arguments: usage: python libsize.py file.so save.txt") 12 | 13 | lib = sys.argv[1] 14 | save = sys.argv[2] 15 | 16 | if not os.path.exists(lib): 17 | sys.exit("Error: requested file ({}) does not exist".format(lib)) 18 | 19 | libsize = os.path.getsize(lib) 20 | 21 | print("------", os.path.basename(lib), "file size:", libsize, end='') 22 | 23 | if os.path.exists(save): 24 | with open(save) as sf: 25 | oldsize = int(sf.readline()) 26 | 27 | if oldsize > 0: 28 | change = libsize - oldsize 29 | if change == 0: 30 | print(" (no change)") 31 | else: 32 | print(" (change of {:+} bytes = {:+.2%})".format(change, change / oldsize)) 33 | else: 34 | print() 35 | 36 | with open(save, 'w') as sf: 37 | sf.write(str(libsize)) 38 | 39 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from setuptools import setup 4 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 5 | 6 | include_dirs = torch.utils.cpp_extension.include_paths() 7 | print(include_dirs) 8 | include_dirs.append('/media/yanshi/windows/attention_kernel/src') 9 | include_dirs.append('/media/yanshi/windows/attention_kernel/pybind11-master/include') 10 | print(include_dirs) 11 | 12 | setup( 13 | name="attention_package", 14 | version="0.2", 15 | description="attention layer", 16 | # url="https://github.com/jbarker-nvidia/pytorch-correlation", 17 | author="Saurus", 18 | author_email="jia1saurus@gmail.com", 19 | ext_modules = [ 20 | CUDAExtension(name='at_cuda', 21 | include_dirs = include_dirs, 22 | sources=['src/attention_kernel.cu', 'src/attention_cuda.cpp']) 23 | ], 24 | cmdclass={ 25 | 'build_ext' : BuildExtension 26 | } 27 | ) 28 | -------------------------------------------------------------------------------- /local_pipeline/model/ATT/src/attention_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _ATTENTION_CUDA_KERNEL 2 | #define _ATTENTION_CUDA_KERNEL 3 | 4 | void attention_forward_ongpu(const float* input1, const float* input2, float* output, int kernel_size, int pad_size, int stride, 5 | int input_cols, int input_rows, int output_cols, int output_rows, int input_ch, int output_ch, int batch_size); 6 | 7 | void attention_backward_ongpu(const float* input1, const float* input2, const float* grad_input, const float* grad_input_padding, 8 | float* grad_output0, float* grad_output1, int kernel_size, int pad_size, int stride, 9 | int input_cols, int input_rows, int output_cols, int output_rows, int input_ch, int output_ch, int batch_size); 10 | 11 | void channel_attention_forward_ongpu(const float* input1, const float* input2, float* output, int kernel_size, int pad_size, int stride, 12 | int input1_cols, int input1_rows, int input2_cols, int input2_rows, int input1_chs, int input2_chs, int batch_size); 13 | 14 | void channel_attention_backward_ongpu(const float* input1, const float* input2, const float* grad_input, float* grad_output1, float* grad_output2, 15 | int kernel_size, int pad_size, int stride, int input1_cols, int input1_rows, int input2_cols, int input2_rows, int input1_chs, int input2_chs, int batch_size); 16 | 17 | #endif -------------------------------------------------------------------------------- /local_pipeline/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arplaboratory/UASTHN/1b27df8b5b4305f8f41936736a05e8117f7f20db/local_pipeline/model/__init__.py -------------------------------------------------------------------------------- /local_pipeline/model/sync_batchnorm/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : __init__.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | from .batchnorm import set_sbn_eps_mode 12 | from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d 13 | from .batchnorm import patch_sync_batchnorm, convert_model 14 | from .replicate import DataParallelWithCallback, patch_replication_callback 15 | -------------------------------------------------------------------------------- /local_pipeline/model/sync_batchnorm/batchnorm_reimpl.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File : batchnorm_reimpl.py 4 | # Author : acgtyrant 5 | # Date : 11/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.init as init 14 | 15 | __all__ = ['BatchNorm2dReimpl'] 16 | 17 | 18 | class BatchNorm2dReimpl(nn.Module): 19 | """ 20 | A re-implementation of batch normalization, used for testing the numerical 21 | stability. 22 | 23 | Author: acgtyrant 24 | See also: 25 | https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14 26 | """ 27 | def __init__(self, num_features, eps=1e-5, momentum=0.1): 28 | super().__init__() 29 | 30 | self.num_features = num_features 31 | self.eps = eps 32 | self.momentum = momentum 33 | self.weight = nn.Parameter(torch.empty(num_features)) 34 | self.bias = nn.Parameter(torch.empty(num_features)) 35 | self.register_buffer('running_mean', torch.zeros(num_features)) 36 | self.register_buffer('running_var', torch.ones(num_features)) 37 | self.reset_parameters() 38 | 39 | def reset_running_stats(self): 40 | self.running_mean.zero_() 41 | self.running_var.fill_(1) 42 | 43 | def reset_parameters(self): 44 | self.reset_running_stats() 45 | init.uniform_(self.weight) 46 | init.zeros_(self.bias) 47 | 48 | def forward(self, input_): 49 | batchsize, channels, height, width = input_.size() 50 | numel = batchsize * height * width 51 | input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel) 52 | sum_ = input_.sum(1) 53 | sum_of_square = input_.pow(2).sum(1) 54 | mean = sum_ / numel 55 | sumvar = sum_of_square - sum_ * mean 56 | 57 | self.running_mean = ( 58 | (1 - self.momentum) * self.running_mean 59 | + self.momentum * mean.detach() 60 | ) 61 | unbias_var = sumvar / (numel - 1) 62 | self.running_var = ( 63 | (1 - self.momentum) * self.running_var 64 | + self.momentum * unbias_var.detach() 65 | ) 66 | 67 | bias_var = sumvar / numel 68 | inv_std = 1 / (bias_var + self.eps).pow(0.5) 69 | output = ( 70 | (input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) * 71 | self.weight.unsqueeze(1) + self.bias.unsqueeze(1)) 72 | 73 | return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous() 74 | 75 | -------------------------------------------------------------------------------- /local_pipeline/model/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File : unittest.py 3 | # Author : Jiayuan Mao 4 | # Email : maojiayuan@gmail.com 5 | # Date : 27/01/2018 6 | # 7 | # This file is part of Synchronized-BatchNorm-PyTorch. 8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 9 | # Distributed under MIT License. 10 | 11 | import unittest 12 | import torch 13 | 14 | 15 | class TorchTestCase(unittest.TestCase): 16 | def assertTensorClose(self, x, y): 17 | adiff = float((x - y).abs().max()) 18 | if (y == 0).all(): 19 | rdiff = 'NaN' 20 | else: 21 | rdiff = float((adiff / y).abs().max()) 22 | 23 | message = ( 24 | 'Tensor close check failed\n' 25 | 'adiff={}\n' 26 | 'rdiff={}\n' 27 | ).format(adiff, rdiff) 28 | self.assertTrue(torch.allclose(x, y, atol=1e-5, rtol=1e-3), message) 29 | 30 | -------------------------------------------------------------------------------- /local_pipeline/plot_hist.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | def plot_hist_helper(path): 5 | # An "interface" to matplotlib.axes.Axes.hist() method 6 | plt.figure() 7 | data = np.load(f'{path}/resnpy.npy', allow_pickle=True) 8 | n, bins, patches = plt.hist(x=data, bins=np.linspace(0, 100, 20)) 9 | plt.title("Test MACE") 10 | plt.ylim(0, 20000) 11 | plt.xlabel("MACE") 12 | plt.ylabel("Frequency") 13 | plt.savefig(f"{path}/hist.png") 14 | plt.close() 15 | 16 | # plt.figure() 17 | # flow_data = np.load(f'{path}/flownpy.npy', allow_pickle=True) 18 | # n, bins, patches = plt.hist(x=flow_data, bins=np.linspace(0, 100, 20)) 19 | # plt.title("Test Flow") 20 | # plt.ylim(0, 20000) 21 | # plt.xlabel("Flow") 22 | # plt.ylabel("Frequency") 23 | # plt.savefig(f"{path}/flowhist.png") 24 | # plt.close() 25 | 26 | if __name__ == '__main__': 27 | path = "IHN_results/satellite_thermal_dense" 28 | plot_hist_helper(path) -------------------------------------------------------------------------------- /scripts/global/eval.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=1 # uses 1 compute core per task 5 | #SBATCH --time=1:00:00 6 | #SBATCH --gres=gpu:1 7 | #SBATCH --mem=32GB 8 | #SBATCH --job-name=eval_thermal 9 | 10 | eval "$(conda shell.bash hook)" 11 | conda activate UASTHN 12 | 13 | python3 global_pipeline/eval.py --resume='logs/global_retrieval/'$1'/best_model.pth' --dataset_name=satellite_0_thermalmapping_135_dense --datasets_folder ./datasets --aggregation gem --infer_batch_size 16 --backbone $2 --fc_output_dim $3 --G_contrast $4 14 | 15 | python3 global_pipeline/eval.py --resume='logs/global_retrieval/'$1'/best_model.pth' --dataset_name=satellite_0_thermalmapping_135 --datasets_folder ./datasets --aggregation gem --infer_batch_size 16 --prior_location_threshold=512 --backbone $2 --fc_output_dim $3 --G_contrast $4 -------------------------------------------------------------------------------- /scripts/global/eval_anyloc.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=1 # uses 1 compute core per task 5 | #SBATCH --time=9:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=eval_thermal 9 | 10 | eval "$(conda shell.bash hook)" 11 | conda activate UASTHN 12 | 13 | python3 global_pipeline/eval_anyloc.py --dataset_name=satellite_0_thermalmapping_135 --datasets_folder ./datasets --infer_batch_size 1 --prior_location_threshold=512 --resize 504 504 -------------------------------------------------------------------------------- /scripts/global/eval_satellite_translation_exclude_dense.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=16 # uses 1 compute core per task 5 | #SBATCH --time=12:00:00 6 | #SBATCH --gres=gpu:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=generate_extended 9 | #SBATCH --output=generate_extended.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 global_pipeline/eval_pix2pix_generate_h5_exclude.py --resume='logs/default/'$1'/best_model.pth' --dataset_name=satellite_0_satellite_0_dense_largest_ori --datasets_folder ./datasets --G_net unet --GAN_upsample bilinear --GAN_resize 512 512 --resize 768 768 -------------------------------------------------------------------------------- /scripts/global/train_bing_thermal_partial_resnet50_gem_extended.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=16 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=train_sgm 9 | #SBATCH --output=train_sgm.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 global_pipeline/train.py --dataset_name satellite_0_thermalmapping_135_contrast_dense_exclusion --backbone resnet50conv4 --aggregation gem --mining partial --datasets_folder ./datasets --save_dir global_retrieval --lr 0.00001 --fc_output_dim $FC --train_batch_size 16 --infer_batch_size 256 --num_workers 16 --epochs_num 100 --patience 50 --negs_num_per_query 2 --queries_per_epoch 50000 --cache_refresh_rate 10000 --unfreeze --use_extended_data --G_contrast manual -------------------------------------------------------------------------------- /scripts/global/train_bing_thermal_partial_resnet50_gem_extended_DANN.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=16 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=train_sgm 9 | #SBATCH --output=train_sgm.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 global_pipeline/train.py --dataset_name satellite_0_thermalmapping_135_contrast_dense_exclusion --backbone resnet50conv4 --aggregation gem --mining partial --datasets_folder ./datasets --save_dir global_retrieval --lr 0.00001 --fc_output_dim $FC --train_batch_size 16 --infer_batch_size 256 --num_workers 16 --epochs_num 100 --patience 50 --negs_num_per_query 2 --queries_per_epoch 50000 --cache_refresh_rate 10000 --unfreeze --use_extended_data --G_contrast manual --DA --lambda_DA 0.1 -------------------------------------------------------------------------------- /scripts/global/train_bing_thermal_partial_resnet50_netvlad_extended.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=48:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=train_sgm 9 | #SBATCH --output=train_sgm.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 global_pipeline/train.py --dataset_name satellite_0_thermalmapping_135_contrast_dense_exclusion --backbone resnet50conv4 --aggregation netvlad --mining partial --datasets_folder ./datasets --save_dir global_retrieval --lr 0.00001 --fc_output_dim $FC --train_batch_size 16 --infer_batch_size 256 --num_workers 8 --epochs_num 100 --patience 50 --negs_num_per_query 2 --queries_per_epoch 50000 --cache_refresh_rate 10000 --unfreeze --use_extended_data --G_contrast manual -------------------------------------------------------------------------------- /scripts/global/train_bing_thermal_partial_resnet50_netvlad_extended_DANN.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=48:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=train_sgm 9 | #SBATCH --output=train_sgm.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 global_pipeline/train.py --dataset_name satellite_0_thermalmapping_135_contrast_dense_exclusion --backbone resnet50conv4 --aggregation netvlad --mining partial --datasets_folder ./datasets --save_dir global_retrieval --lr 0.00001 --fc_output_dim $FC --train_batch_size 16 --infer_batch_size 256 --num_workers 8 --epochs_num 100 --patience 50 --negs_num_per_query 2 --queries_per_epoch 50000 --cache_refresh_rate 10000 --unfreeze --use_extended_data --G_contrast manual --DA --lambda_DA 0.1 -------------------------------------------------------------------------------- /scripts/global/train_bing_thermal_translation_100.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=16 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=train_tgm 9 | #SBATCH --output=train_tgm.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 global_pipeline/train_pix2pix.py --dataset_name=satellite_0_thermalmapping_135 --datasets_folder=datasets --train_batch_size 32 --lr 0.0002 --patience 40 --epochs_num 60 --G_net unet --G_loss_lambda 100.0 --D_net patchGAN --GAN_save_freq 5 --GAN_resize 768 768 --GAN_epochs_decay 20 --G_contrast manual --num_workers 16 15 | -------------------------------------------------------------------------------- /scripts/global/train_bing_thermal_translation_100_nocontrast.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=16 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=200GB 8 | #SBATCH --job-name=train_tgm 9 | #SBATCH --output=train_tgm.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 global_pipeline/train_pix2pix.py --dataset_name=satellite_0_thermalmapping_135_new --datasets_folder=datasets --train_batch_size 32 --lr 0.0001 --patience 40 --epochs_num 160 --G_net unet --G_loss_lambda 100.0 --D_net patchGAN --GAN_save_freq 5 --GAN_resize 768 768 --GAN_epochs_decay 40 --num_workers 16 15 | -------------------------------------------------------------------------------- /scripts/keypoint/eval_loftr.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=02:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=eval_keypoint 9 | #SBATCH --output=eval_keypoint.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./keypoint_pipeline/myloftr/myevaluate.py --dataset_name satellite_0_thermalmapping_135 --eval_model logs/local_key/test --val_positive_dist_threshold 50 --database_size 512 --batch_size 1 --test -------------------------------------------------------------------------------- /scripts/keypoint/eval_r2d2.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=02:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=eval_keypoint 9 | #SBATCH --output=eval_keypoint.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./keypoint_pipeline/myr2d2/myevaluate.py --dataset_name satellite_0_thermalmapping_135 --eval_model logs/local_key/$MODEL --val_positive_dist_threshold 50 --database_size 512 --batch_size 1 --test -------------------------------------------------------------------------------- /scripts/keypoint/train_50_r2d2.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_keypoint 9 | #SBATCH --output=train_keypoint.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./keypoint_pipeline/myr2d2/train_key.py --dataset_name satellite_0_thermalmapping_135 --val_positive_dist_threshold 50 --database_size 512 --num_steps 200000 --batch_size 8 -------------------------------------------------------------------------------- /scripts/keypoint/train_50_r2d2_loss.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_keypoint 9 | #SBATCH --output=train_keypoint.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./keypoint_pipeline/myr2d2/train_key.py --dataset_name satellite_0_thermalmapping_135 --val_positive_dist_threshold 50 --database_size 512 --num_steps 200000 --disable_reliability --batch_size 8 -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ihn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=eval_local 9 | #SBATCH --output=eval_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --test -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_sthn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=eval_local 9 | #SBATCH --output=eval_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --test -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_c_ihn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --array=5 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --first_stage_ue --test --batch_size $BAN --ue_aug_method shift --ue_num_crops $CROP --ue_shift_crops_types $ST --ue_shift $SHIFT --ue_seed $USEED --ue_std_method $STDM --ue_agg $AGG --check_step $CS --arch $ARCH -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_c_sthn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --array=5 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --first_stage_ue --test --batch_size $BAN --ue_aug_method shift --ue_num_crops $CROP --ue_shift_crops_types $ST --ue_shift $SHIFT --ue_seed $USEED --ue_std_method $STDM --ue_agg $AGG --check_step $CS --arch $ARCH -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_c_sthn_val.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --array=5 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --first_stage_ue --batch_size $BAN --ue_aug_method shift --ue_num_crops $CROP --ue_shift_crops_types $ST --ue_shift $SHIFT --ue_seed $USEED --ue_std_method $STDM --ue_agg $AGG --check_step $CS -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_ce_ihn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --array=5 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --first_stage_ue --test --batch_size $BAN --ue_aug_method shift --ue_num_crops $CROP --ue_shift_crops_types $ST --ue_shift $SHIFT --ue_seed $USEED --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method augment_ensemble --ue_ensemble_load_models $EN --arch $ARCH --ue_combine $COM -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_ce_sthn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --array=5 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --first_stage_ue --test --batch_size $BAN --ue_aug_method shift --ue_num_crops $CROP --ue_shift_crops_types $ST --ue_shift $SHIFT --ue_seed $USEED --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method augment_ensemble --ue_ensemble_load_models $EN --arch $ARCH --ue_combine $COM -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_ce_val.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --array=5 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --first_stage_ue --batch_size $BAN --ue_aug_method shift --ue_num_crops $CROP --ue_shift_crops_types $ST --ue_shift $SHIFT --ue_seed $USEED --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method augment_ensemble --ue_ensemble_load_models $EN --arch $ARCH --ue_combine $COM -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_d_ihn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=01:00:00 6 | #SBATCH --array=5 7 | #SBATCH --gres=gpu:a100:1 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --first_stage_ue --test --batch_size $BAN --ue_num_crops $CROP --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method single -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_d_sthn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=01:00:00 6 | #SBATCH --array=5 7 | #SBATCH --gres=gpu:a100:1 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --first_stage_ue --test --batch_size $BAN --ue_num_crops $CROP --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method single -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_e_ihn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=01:20:00 6 | #SBATCH --array=5 7 | #SBATCH --gres=gpu:a100:1 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --first_stage_ue --test --batch_size $BAN --ue_num_crops $CROP --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method ensemble --ue_ensemble_load_models $EN --arch $ARCH -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_e_sthn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=01:20:00 6 | #SBATCH --array=5 7 | #SBATCH --gres=gpu:a100:1 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --first_stage_ue --test --batch_size $BAN --ue_num_crops $CROP --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method ensemble --ue_ensemble_load_models $EN --arch $ARCH -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_ue1_e_sthn_val.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=01:00:00 6 | #SBATCH --array=5 7 | #SBATCH --gres=gpu:a100:1 8 | #SBATCH --mem=250GB 9 | #SBATCH --job-name=eval_local 10 | #SBATCH --output=eval_local.out 11 | 12 | eval "$(conda shell.bash hook)" 13 | conda activate UASTHN 14 | 15 | CROP=$SLURM_ARRAY_TASK_ID 16 | 17 | if [ "$CROP" -le 5 ]; then 18 | BAN=8 19 | else 20 | BAN=4 21 | fi 22 | 23 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 --first_stage_ue --batch_size $BAN --ue_num_crops $CROP --ue_std_method $STDM --ue_agg $AGG --check_step $CS --ue_method ensemble --ue_ensemble_load_models $EN -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_val_ihn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=eval_local 9 | #SBATCH --output=eval_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 -------------------------------------------------------------------------------- /scripts/local_largest_1536/eval_local_sparse_extended_2_val_sthn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=00:40:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=eval_local 9 | #SBATCH --output=eval_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/myevaluate.py --dataset_name satellite_0_thermalmapping_135_train --eval_model logs/local_he/$MODEL --val_positive_dist_threshold $DC --lev0 --database_size 1536 --corr_level 4 --two_stages --fine_padding 32 -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_ihn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 200000 --corr_level 4 --seed $SEED -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_ihn_c_ue1r32.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 200000 --corr_level 4 --first_stage_ue --batch_size 8 --ue_aug_method shift --ue_shift 32 --ue_num_crops 5 --ue_shift_crops_types random --exclude_val_region --ue_agg zero --lr 5e-5 --restore_ckpt logs/local_he/$MODEL/UASTHN.pth -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_ihn_c_ue1r32_aug.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 200000 --corr_level 4 --first_stage_ue --batch_size 8 --ue_aug_method shift --ue_shift 32 --ue_num_crops 5 --ue_shift_crops_types random --exclude_val_region --ue_agg zero --lr 5e-5 --restore_ckpt logs/local_he/$MODEL/UASTHN.pth --augment img --perspective_max 16 --rotate_max 0.523599 --resize_max 0.3 -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_ihn_d.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 200000 --corr_level 4 --first_stage_ue --ue_method single -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_load_f_aug64_c_ue1r32.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 200000 --two_stages --corr_level 4 --restore_ckpt logs/local_he/$MODEL/UASTHN.pth --finetune --detach --augment_two_stages 64 --first_stage_ue --batch_size 8 --ue_aug_method shift --ue_shift 32 --ue_num_crops 5 --ue_shift_crops_types random --exclude_val_region --ue_agg mean --lr 5e-5 -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_load_f_aug64_c_ue1r32_aug.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=24:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 200000 --two_stages --corr_level 4 --restore_ckpt logs/local_he/$MODEL/UASTHN.pth --finetune --detach --augment_two_stages 64 --first_stage_ue --batch_size 8 --ue_aug_method shift --ue_shift 32 --ue_num_crops 5 --ue_shift_crops_types random --exclude_val_region --augment img --perspective_max 16 --rotate_max 0.523599 --resize_max 0.3 -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_load_f_aug64_sthn.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=36:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 300000 --two_stages --corr_level 4 --restore_ckpt logs/local_he/$MODEL/UASTHN.pth --finetune --detach --augment_two_stages 64 --seed $SEED -------------------------------------------------------------------------------- /scripts/local_largest_1536/train_local_sparse_extended_long_load_f_aug64_sthn_d.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 # requests 3 compute servers 3 | #SBATCH --ntasks-per-node=1 # runs 2 tasks on each server 4 | #SBATCH --cpus-per-task=8 # uses 1 compute core per task 5 | #SBATCH --time=48:00:00 6 | #SBATCH --gres=gpu:a100:1 7 | #SBATCH --mem=250GB 8 | #SBATCH --job-name=train_local 9 | #SBATCH --output=train_local.out 10 | 11 | eval "$(conda shell.bash hook)" 12 | conda activate UASTHN 13 | 14 | python3 ./local_pipeline/train_4cor.py --dataset_name satellite_0_thermalmapping_135_train --val_positive_dist_threshold $DC --database_size 1536 --num_steps 300000 --two_stages --corr_level 4 --restore_ckpt logs/local_he/$MODEL/UASTHN.pth --finetune --detach --augment_two_stages 64 --first_stage_ue --batch_size 16 --exclude_val_region --ue_method single -------------------------------------------------------------------------------- /train_global.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # sbatch --export=ALL,FC=4096 scripts/global/train_bing_thermal_partial_resnet50_netvlad_extended_DANN.sbatch 4 | # sbatch --export=ALL,FC=4096 scripts/global/train_bing_thermal_partial_resnet50_netvlad_extended.sbatch 5 | # sbatch --export=ALL,FC=4096 scripts/global/train_bing_thermal_partial_resnet50_gem_extended_DANN.sbatch 6 | # sbatch --export=ALL,FC=4096 scripts/global/train_bing_thermal_partial_resnet50_gem_extended.sbatch 7 | # sbatch --export=ALL,FC=4096 scripts/global/train_bing_thermal_partial_resnet50_gem_nocontrast_extended.sbatch 8 | # sbatch --export=ALL,FC=4096 scripts/global/train_bing_thermal_partial_resnet50_gem_nocontrast.sbatch -------------------------------------------------------------------------------- /transform_dataset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | eval "$(conda shell.bash hook)" 3 | conda activate UAGL 4 | 5 | # # bing + bing 6 | # python global_pipeline/h5_transformer.py --database_name satellite --database_index 0 --queries_name satellite --queries_index 0 --compress --sample_method stride --region_num 1 --crop_width 2560 --stride 35 --generate_data database --maintain_size & 7 | 8 | # # # # # bing + thermal_1 9 | # python global_pipeline/h5_transformer.py --database_name satellite --database_index 0 --queries_name thermalmapping --queries_index 0 --compress --sample_method stride --region_num 1 --crop_width 2560 --generate_data database --maintain_size & 10 | 11 | # # # # # bing + thermal_2 12 | # python global_pipeline/h5_transformer.py --database_name satellite --database_index 0 --queries_name thermalmapping --queries_index 1 --compress --sample_method stride --region_num 2 --crop_width 2560 --generate_data database --maintain_size & 13 | 14 | # # # # # bing + thermal_3 15 | # python global_pipeline/h5_transformer.py --database_name satellite --database_index 0 --queries_name thermalmapping --queries_index 2 --compress --sample_method stride --region_num 1 --crop_width 2560 --generate_data database --maintain_size & 16 | 17 | # # # # # bing + thermal_4 18 | # python global_pipeline/h5_transformer.py --database_name satellite --database_index 0 --queries_name thermalmapping --queries_index 3 --compress --sample_method stride --region_num 2 --crop_width 2560 --generate_data database --maintain_size & 19 | 20 | # # # # # bing + thermal_5 21 | # python global_pipeline/h5_transformer.py --database_name satellite --database_index 0 --queries_name thermalmapping --queries_index 4 --compress --sample_method stride --region_num 1 --crop_width 2560 --generate_data database --maintain_size & 22 | 23 | # # # # # bing + thermal_6 24 | # python global_pipeline/h5_transformer.py --database_name satellite --database_index 0 --queries_name thermalmapping --queries_index 5 --compress --sample_method stride --region_num 2 --crop_width 2560 --generate_data database --maintain_size & 25 | 26 | # # # bing + thermal_123456 27 | # python global_pipeline/h5_merger.py --database_name satellite --database_indexes 0 --queries_name thermalmapping --queries_indexes 135 --compress --region_num 2 --generate_data database --resize_width 2560 & 28 | 29 | # python global_pipeline/h5_merger.py --database_name satellite --database_indexes 0 --queries_name thermalmapping --queries_indexes 024 --compress --region_num 1 --generate_data database --resize_width 2560 & 30 | 31 | # rm -r ./datasets/satellite_0_thermalmapping_0 32 | # rm -r ./datasets/satellite_0_thermalmapping_1 33 | # rm -r ./datasets/satellite_0_thermalmapping_2 34 | # rm -r ./datasets/satellite_0_thermalmapping_3 35 | # rm -r ./datasets/satellite_0_thermalmapping_4 36 | # rm -r ./datasets/satellite_0_thermalmapping_5 -------------------------------------------------------------------------------- /utils/compare.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | from tqdm import tqdm 3 | 4 | f1 = h5py.File("datasets/satellite_0_thermalmapping_135/train_queries.h5", "r") 5 | f2 = h5py.File("datasets/satellite_0_thermalmapping_135_dense/train_queries.h5", "r") 6 | # # compare image name 7 | # for i in tqdm(range(len(f1["image_name"]))): 8 | # if f1["image_name"][i] != f2["image_name"][i]: 9 | # raise KeyError() 10 | # compare image data 11 | for i in tqdm(range(len(f1["image_data"]))): 12 | if (f1["image_data"][i] != f2["image_data"][i]).any(): 13 | raise KeyError() 14 | 15 | -------------------------------------------------------------------------------- /utils/plotting.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import sys 3 | import os 4 | import statistics 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import random 8 | import cv2 9 | 10 | random.seed(1) 11 | np.random.seed(1) 12 | 13 | def process_results_simulation(error_m, save_folder): 14 | res_error_m = error_m 15 | #res_error_m = [e for gt, e in zip(res_ground_truth, res_error_m) if mask[gt[1], gt[0]]>100] 16 | #res_ground_truth = [gt for gt in res_ground_truth if mask[gt[1], gt[0]]>100] 17 | 18 | if not os.path.exists(save_folder): 19 | os.makedirs(save_folder, exist_ok=True) 20 | 21 | save_filename = os.path.join(save_folder, 'matching_results.txt') 22 | f = open(save_filename, "a") 23 | 24 | total_tested = len(res_error_m) 25 | error_0 = res_error_m.count(0) 26 | f.write("Perfect matches: %d of %d (%.2f%%) \n" % (error_0, total_tested, 100*error_0/total_tested)) 27 | 28 | error_25 = sum(x <= 25 for x in res_error_m) 29 | f.write("Mismatch less or equal to 25m: %d of %d (%.2f%%) \n" % (error_25, total_tested, 100*error_25/total_tested)) 30 | 31 | error_50 = sum(x <= 50 for x in res_error_m) 32 | f.write("Mismatch less or equal to 50m: %d of %d (%.2f%%) \n" % (error_50, total_tested, 100*error_50/total_tested)) 33 | 34 | error_100 = sum(x <= 100 for x in res_error_m) 35 | f.write("Mismatch less or equal to 100m: %d of %d (%.2f%%) \n" % (error_100, total_tested, 100*error_100/total_tested)) 36 | 37 | error_150 = sum(x <= 150 for x in res_error_m) 38 | f.write("Mismatch less or equal to 150m: %d of %d (%.2f%%) \n" % (error_150, total_tested, 100*error_150/total_tested)) 39 | 40 | f.write("Mean error: %.2fm \n" % (np.mean(res_error_m))) 41 | print(f"Mean error: {np.mean(res_error_m)}") 42 | 43 | text_result = np.histogram(res_error_m, bins=16, range=[0, 512]) 44 | f.write(f"Historgram: {text_result}") 45 | 46 | f.close() 47 | 48 | plt.hist(res_error_m, histtype='step', bins=16, range=[0, 512]) 49 | plt.title('Histogram of L_2 Distance Error') 50 | plt.xlabel("Error") 51 | plt.ylabel("Frequency") 52 | plt.savefig(os.path.join(save_folder, 'hist_error_localization.pdf')) --------------------------------------------------------------------------------