├── .gitignore ├── README.md ├── assets ├── architecture.png └── motivation.png ├── clip ├── .DS_Store ├── __init__.py ├── bpe_simple_vocab_16e6.txt.gz ├── clip.py ├── model.py └── simple_tokenizer.py ├── configs ├── modelnet40.yaml ├── modelnet40_c.yaml ├── modelnet40_views.yaml ├── modelnet_c.yaml ├── modelnet_c_sdxl.yaml ├── modelnet_c_views.yaml ├── objaverse_lvis.yaml ├── omniobject3d.yaml ├── pointda_modelnet.yaml ├── pointda_scannet.yaml ├── pointda_shapenet.yaml ├── scanobjectnn.yaml ├── scanobjnn.yaml ├── sim2real_sonn.yaml ├── snv2_c.yaml └── sonn_c.yaml ├── datasets ├── augmix_ops.py ├── modelnet40.py ├── modelnet40_c.py ├── modelnet40_sdxl.py ├── modelnet40_views.py ├── modelnet_c.py ├── modelnet_c_views.py ├── objaverse_lvis.py ├── omniobject3d.py ├── pointda_modelnet.py ├── pointda_scannet.py ├── pointda_shapenet.py ├── scanobjectnn.py ├── scanobjnn.py ├── sim2real_sonn.py ├── snv2_c.py ├── sonn_c.py ├── templates.py └── utils.py ├── env.yaml ├── llm ├── da_mn10_gpt35_prompts.json ├── da_mn10_gpt4_prompts.json ├── da_mn10_pointllm_prompts.json ├── da_scan10_gpt35_prompts.json ├── da_scan10_gpt4_prompts.json ├── da_scan10_pointllm_prompts.json ├── da_sn10_gpt35_prompts.json ├── da_sn10_gpt4_prompts.json ├── da_sn10_pointllm_prompts.json ├── llm_generate_prompts.py ├── mn11_gpt35_prompts.json ├── mn11_gpt4_prompts.json ├── mn11_pointllm_prompts.json ├── mn40_gpt35_prompts.json ├── mn40_gpt4_prompts.json ├── mn40_pointllm4_prompts.json ├── mn40_pointllm_prompts.json ├── objaverse_lvis-gpt3.5-turbo.json ├── omniobject3d-gpt3.5-turbo.json ├── sn9_gpt35_prompts.json ├── sn9_gpt4_prompts.json ├── sn9_pointllm_prompts.json ├── snv2_gpt35_prompts.json ├── snv2_gpt4_prompts.json ├── snv2_pointllm4_prompts.json ├── snv2_pointllm_prompts.json ├── sonn_gpt35_prompts.json ├── sonn_gpt4_prompts.json ├── sonn_pointllm4_prompts.json └── sonn_pointllm_prompts.json ├── models ├── openshape │ ├── __init__.py │ ├── config.yaml │ ├── pointnet_util.py │ └── ppta.py ├── ulip │ ├── __init__.py │ ├── pointbert │ │ ├── PointTransformer_8192point.yaml │ │ ├── checkpoint.py │ │ ├── dvae.py │ │ ├── logger.py │ │ ├── misc.py │ │ └── point_encoder.py │ ├── text_encoder.py │ └── ulip_model.py └── uni3d │ ├── __init__.py │ └── point_encoder.py ├── notebook ├── check_cache_threshold.ipynb ├── clip │ ├── .DS_Store │ ├── __init__.py │ ├── bpe_simple_vocab_16e6.txt.gz │ ├── clip.py │ ├── model.py │ └── simple_tokenizer.py ├── ent_acc_correspondence.txt ├── generate_pc_cls_images.py ├── gifs │ ├── os_guitar.gif │ ├── os_plant.gif │ ├── ulip2_table.gif │ ├── ulip2_toilet.gif │ ├── ulip_shelf.gif │ ├── ulip_sofa.gif │ ├── uni3d_calculator.gif │ └── uni3d_hair_dryer.gif ├── images │ ├── ablate_alpha_in_cache.pdf │ ├── ablate_beta_in_cache.pdf │ ├── ablate_k_shot.pdf │ ├── ablate_n_cluster.pdf │ ├── acc_on_clean_and_corrupted_mn40.pdf │ ├── acc_on_clean_and_corrupted_sonn_hardest.pdf │ ├── acc_on_omni3d_4096pts.pdf │ ├── astronaut.png │ ├── bottle.png │ ├── ent_acc_openshape_modelnet_c_dropout_local_2.pdf │ ├── ent_acc_relationship_1.pdf │ ├── ent_acc_relationship_2.pdf │ ├── ent_acc_ulip1_sonn_c_obj_only_rotate_2.pdf │ ├── ent_acc_ulip2_so_hardest.pdf │ ├── ent_acc_uni3d_4096pts.pdf │ ├── laptop.png │ ├── large_3d_models_pt_base2new_mn40.pdf │ ├── mn40_obj_0.pdf │ ├── mn40_obj_1.pdf │ ├── mn40_obj_2.pdf │ ├── mn40_obj_3.pdf │ ├── mn40_obj_4.pdf │ ├── modelnet_c_dropout_local_2_bottle.pdf │ ├── modelnet_c_dropout_local_2_bottle.png │ ├── modelnet_c_dropout_local_2_glass_box.pdf │ ├── modelnet_c_dropout_local_2_glass_box.png │ ├── modelnet_c_dropout_local_2_guitar.pdf │ ├── modelnet_c_dropout_local_2_guitar.png │ ├── modelnet_c_dropout_local_2_plant.pdf │ ├── modelnet_c_dropout_local_2_plant.png │ ├── modelnet_c_dropout_local_2_stairs.pdf │ ├── modelnet_c_dropout_local_2_stairs.png │ ├── modelnet_c_dropout_local_2_tent.pdf │ ├── modelnet_c_dropout_local_2_tent.png │ ├── modelnet_c_dropout_local_2_tv_stand.pdf │ ├── modelnet_c_dropout_local_2_tv_stand.png │ ├── omni3d_calculator_4096.pdf │ ├── omni3d_calculator_4096.png │ ├── omniobject3d_4096pts_hair_dryer.pdf │ ├── omniobject3d_4096pts_hair_dryer.png │ ├── omniobject3d_4096pts_pomegranate.pdf │ ├── omniobject3d_4096pts_pomegranate.png │ ├── omniobject3d_4096pts_shampoo.pdf │ ├── omniobject3d_4096pts_shampoo.png │ ├── omniobject3d_4096pts_toy_truck.pdf │ ├── omniobject3d_4096pts_toy_truck.png │ ├── omniobject3d_4096pts_watch.pdf │ ├── omniobject3d_4096pts_watch.png │ ├── omniobject3d_4096pts_watermelon.pdf │ ├── omniobject3d_4096pts_watermelon.png │ ├── openshape_modelnet_c_dropout_local_2_acc.pdf │ ├── os_mn_c_dropout_local_global_bottle.pdf │ ├── os_mn_c_dropout_local_global_door.pdf │ ├── os_mn_c_dropout_local_global_glass_box.pdf │ ├── os_mn_c_dropout_local_global_guitar.pdf │ ├── os_mn_c_dropout_local_global_plant.pdf │ ├── os_mn_c_dropout_local_global_stairs.pdf │ ├── os_mn_c_dropout_local_global_tent.pdf │ ├── os_mn_c_dropout_local_global_tv_stand.pdf │ ├── os_mn_c_dropout_local_hierar_bottle.pdf │ ├── os_mn_c_dropout_local_hierar_door.pdf │ ├── os_mn_c_dropout_local_hierar_glass_box.pdf │ ├── os_mn_c_dropout_local_hierar_guitar.pdf │ ├── os_mn_c_dropout_local_hierar_plant.pdf │ ├── os_mn_c_dropout_local_hierar_stairs.pdf │ ├── os_mn_c_dropout_local_hierar_tent.pdf │ ├── os_mn_c_dropout_local_hierar_tv_stand.pdf │ ├── os_mn_c_dropout_local_zero_bottle.pdf │ ├── os_mn_c_dropout_local_zero_door.pdf │ ├── os_mn_c_dropout_local_zero_glass_box.pdf │ ├── os_mn_c_dropout_local_zero_guitar.pdf │ ├── os_mn_c_dropout_local_zero_plant.pdf │ ├── os_mn_c_dropout_local_zero_stairs.pdf │ ├── os_mn_c_dropout_local_zero_tent.pdf │ ├── os_mn_c_dropout_local_zero_tv_stand.pdf │ ├── owl.png │ ├── owl_generated_from_pc.png │ ├── scanobjnn_hardest_bed.pdf │ ├── scanobjnn_hardest_bed.png │ ├── scanobjnn_hardest_box.pdf │ ├── scanobjnn_hardest_box.png │ ├── scanobjnn_hardest_cabinet.pdf │ ├── scanobjnn_hardest_cabinet.png │ ├── scanobjnn_hardest_display.pdf │ ├── scanobjnn_hardest_display.png │ ├── scanobjnn_hardest_sink.pdf │ ├── scanobjnn_hardest_sink.png │ ├── scanobjnn_hardest_table.pdf │ ├── scanobjnn_hardest_table.png │ ├── scanobjnn_hardest_toilet.pdf │ ├── scanobjnn_hardest_toilet.png │ ├── sdxl-airplane.png │ ├── sdxl-car.png │ ├── sdxl-resized_airplane.png │ ├── sdxl-resized_desk.png │ ├── sdxl-ship.png │ ├── sonn_c_obj_only_rotate_2_bag.pdf │ ├── sonn_c_obj_only_rotate_2_bag.png │ ├── sonn_c_obj_only_rotate_2_bin.pdf │ ├── sonn_c_obj_only_rotate_2_bin.png │ ├── sonn_c_obj_only_rotate_2_desk.pdf │ ├── sonn_c_obj_only_rotate_2_desk.png │ ├── sonn_c_obj_only_rotate_2_door.pdf │ ├── sonn_c_obj_only_rotate_2_door.png │ ├── sonn_c_obj_only_rotate_2_pillow.pdf │ ├── sonn_c_obj_only_rotate_2_pillow.png │ ├── sonn_c_obj_only_rotate_2_shelf.pdf │ ├── sonn_c_obj_only_rotate_2_shelf.png │ ├── sonn_c_obj_only_rotate_2_sofa.pdf │ ├── sonn_c_obj_only_rotate_2_sofa.png │ ├── ulip1_sonn_c_obj_only_rotate_2_acc.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_global_bag.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_global_bin.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_global_desk.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_global_door.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_global_pillow.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_global_shelf.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_global_sofa.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_hierar_bag.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_hierar_bin.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_hierar_desk.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_hierar_door.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_hierar_pillow.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_hierar_shelf.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_hierar_sofa.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_zero_bag.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_zero_bin.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_zero_desk.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_zero_door.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_zero_pillow.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_zero_shelf.pdf │ ├── ulip1_sonn_c_obj_only_rotate_2_zero_sofa.pdf │ ├── ulip2_scanobjnn_hardest_acc.pdf │ ├── ulip2_sonn_hardest_global_bed.pdf │ ├── ulip2_sonn_hardest_global_box.pdf │ ├── ulip2_sonn_hardest_global_cabinet.pdf │ ├── ulip2_sonn_hardest_global_display.pdf │ ├── ulip2_sonn_hardest_global_sink.pdf │ ├── ulip2_sonn_hardest_global_table.pdf │ ├── ulip2_sonn_hardest_global_toilet.pdf │ ├── ulip2_sonn_hardest_hierar_bed.pdf │ ├── ulip2_sonn_hardest_hierar_box.pdf │ ├── ulip2_sonn_hardest_hierar_cabinet.pdf │ ├── ulip2_sonn_hardest_hierar_display.pdf │ ├── ulip2_sonn_hardest_hierar_sink.pdf │ ├── ulip2_sonn_hardest_hierar_table.pdf │ ├── ulip2_sonn_hardest_hierar_toilet.pdf │ ├── ulip2_sonn_hardest_zero_bed.pdf │ ├── ulip2_sonn_hardest_zero_box.pdf │ ├── ulip2_sonn_hardest_zero_cabinet.pdf │ ├── ulip2_sonn_hardest_zero_display.pdf │ ├── ulip2_sonn_hardest_zero_sink.pdf │ ├── ulip2_sonn_hardest_zero_table.pdf │ ├── ulip2_sonn_hardest_zero_toilet.pdf │ ├── uni3d_omni3d_4096_global_calculator.pdf │ ├── uni3d_omni3d_4096_global_hair_dryer.pdf │ ├── uni3d_omni3d_4096_global_pomegranate.pdf │ ├── uni3d_omni3d_4096_global_shampoo.pdf │ ├── uni3d_omni3d_4096_global_toy_truck.pdf │ ├── uni3d_omni3d_4096_global_watch.pdf │ ├── uni3d_omni3d_4096_global_watermelon.pdf │ ├── uni3d_omni3d_4096_hierar_calculator.pdf │ ├── uni3d_omni3d_4096_hierar_hair_dryer.pdf │ ├── uni3d_omni3d_4096_hierar_pomegranate.pdf │ ├── uni3d_omni3d_4096_hierar_shampoo.pdf │ ├── uni3d_omni3d_4096_hierar_toy_truck.pdf │ ├── uni3d_omni3d_4096_hierar_watch.pdf │ ├── uni3d_omni3d_4096_hierar_watermelon.pdf │ ├── uni3d_omni3d_4096_zero_calculator.pdf │ ├── uni3d_omni3d_4096_zero_hair_dryer.pdf │ ├── uni3d_omni3d_4096_zero_pomegranate.pdf │ ├── uni3d_omni3d_4096_zero_shampoo.pdf │ ├── uni3d_omni3d_4096_zero_toy_truck.pdf │ ├── uni3d_omni3d_4096_zero_watch.pdf │ ├── uni3d_omni3d_4096_zero_watermelon.pdf │ └── uni3d_omniobject3d_4096pts_acc.pdf ├── owl_shape_feat.pt ├── try_diffusion_models.ipynb └── visualize.ipynb ├── runners ├── __init__.py ├── model_with_global_cache.py ├── model_with_global_cache_ablate_seed.py ├── model_with_global_cache_mem.py ├── model_with_hierar_caches_ablations.py ├── model_with_hierarchical_caches.py ├── model_with_hierarchical_caches_ablate_seed.py ├── model_with_hierarchical_caches_ent.py ├── model_with_hierarchical_caches_mem.py ├── model_with_hierarchical_caches_speed.py ├── param_count.py ├── record_adaptation_acc.py ├── record_adaptation_logits.py ├── zs_infer.py └── zs_infer_ablate_seed.py ├── scripts ├── eval_model_with_global_cache.sh ├── eval_model_with_global_cache_ablate_seed.sh ├── eval_model_with_global_cache_mem.sh ├── eval_model_with_hierar_caches_ablations.sh ├── eval_model_with_hierarchical_caches.sh ├── eval_model_with_hierarchical_caches_ablate_seed.sh ├── eval_model_with_hierarchical_caches_ent.sh ├── eval_model_with_hierarchical_caches_mem.sh ├── eval_model_with_hierarchical_caches_speed.sh ├── eval_zs_infer.sh ├── eval_zs_infer_ablate_seed.sh ├── param_count.sh ├── record_adaptation_acc.sh └── record_adaptation_logits.sh └── utils ├── __init__.py ├── check_img_text_acc.py ├── compute_mean_and_std.py ├── debug.py ├── find_class_pc.py ├── generate_pc_view_labels.py ├── mv_utils_zs.py ├── utils.py └── visualize.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | local_settings.py 60 | db.sqlite3 61 | db.sqlite3-journal 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | .pybuilder/ 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | # For a library or package, you might want to ignore these files since the code is 86 | # intended to run in multiple environments; otherwise, check them in: 87 | # .python-version 88 | 89 | # pipenv 90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 93 | # install all needed dependencies. 94 | #Pipfile.lock 95 | 96 | # poetry 97 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 98 | # This is especially recommended for binary packages to ensure reproducibility, and is more 99 | # commonly ignored for libraries. 100 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 101 | #poetry.lock 102 | 103 | # pdm 104 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 105 | #pdm.lock 106 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 107 | # in version control. 108 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 109 | .pdm.toml 110 | .pdm-python 111 | .pdm-build/ 112 | 113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 114 | __pypackages__/ 115 | 116 | # Celery stuff 117 | celerybeat-schedule 118 | celerybeat.pid 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Spyder project settings 133 | .spyderproject 134 | .spyproject 135 | 136 | # Rope project settings 137 | .ropeproject 138 | 139 | # mkdocs documentation 140 | /site 141 | 142 | # mypy 143 | .mypy_cache/ 144 | .dmypy.json 145 | dmypy.json 146 | 147 | # Pyre type checker 148 | .pyre/ 149 | 150 | # pytype static type analyzer 151 | .pytype/ 152 | 153 | # Cython debug symbols 154 | cython_debug/ 155 | 156 | # PyCharm 157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 159 | # and can be added to the global gitignore or merged into this file. For a more nuclear 160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 161 | #.idea/ 162 | 163 | # project related 164 | data 165 | weights 166 | wandb 167 | outputs -------------------------------------------------------------------------------- /assets/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/assets/architecture.png -------------------------------------------------------------------------------- /assets/motivation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/assets/motivation.png -------------------------------------------------------------------------------- /clip/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/clip/.DS_Store -------------------------------------------------------------------------------- /clip/__init__.py: -------------------------------------------------------------------------------- 1 | from .clip import * 2 | -------------------------------------------------------------------------------- /clip/bpe_simple_vocab_16e6.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/clip/bpe_simple_vocab_16e6.txt.gz -------------------------------------------------------------------------------- /clip/simple_tokenizer.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import html 3 | import os 4 | from functools import lru_cache 5 | 6 | import ftfy 7 | import regex as re 8 | 9 | 10 | @lru_cache() 11 | def default_bpe(): 12 | return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") 13 | 14 | 15 | @lru_cache() 16 | def bytes_to_unicode(): 17 | """ 18 | Returns list of utf-8 byte and a corresponding list of unicode strings. 19 | The reversible bpe codes work on unicode strings. 20 | This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. 21 | When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. 22 | This is a signficant percentage of your normal, say, 32K bpe vocab. 23 | To avoid that, we want lookup tables between utf-8 bytes and unicode strings. 24 | And avoids mapping to whitespace/control characters the bpe code barfs on. 25 | """ 26 | bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) 27 | cs = bs[:] 28 | n = 0 29 | for b in range(2**8): 30 | if b not in bs: 31 | bs.append(b) 32 | cs.append(2**8+n) 33 | n += 1 34 | cs = [chr(n) for n in cs] 35 | return dict(zip(bs, cs)) 36 | 37 | 38 | def get_pairs(word): 39 | """Return set of symbol pairs in a word. 40 | Word is represented as tuple of symbols (symbols being variable-length strings). 41 | """ 42 | pairs = set() 43 | prev_char = word[0] 44 | for char in word[1:]: 45 | pairs.add((prev_char, char)) 46 | prev_char = char 47 | return pairs 48 | 49 | 50 | def basic_clean(text): 51 | text = ftfy.fix_text(text) 52 | text = html.unescape(html.unescape(text)) 53 | return text.strip() 54 | 55 | 56 | def whitespace_clean(text): 57 | text = re.sub(r'\s+', ' ', text) 58 | text = text.strip() 59 | return text 60 | 61 | 62 | class SimpleTokenizer(object): 63 | def __init__(self, bpe_path: str = default_bpe()): 64 | self.byte_encoder = bytes_to_unicode() 65 | self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} 66 | merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') 67 | merges = merges[1:49152-256-2+1] 68 | merges = [tuple(merge.split()) for merge in merges] 69 | vocab = list(bytes_to_unicode().values()) 70 | vocab = vocab + [v+'' for v in vocab] 71 | for merge in merges: 72 | vocab.append(''.join(merge)) 73 | vocab.extend(['<|startoftext|>', '<|endoftext|>']) 74 | self.encoder = dict(zip(vocab, range(len(vocab)))) 75 | self.decoder = {v: k for k, v in self.encoder.items()} 76 | self.bpe_ranks = dict(zip(merges, range(len(merges)))) 77 | self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} 78 | self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) 79 | 80 | def bpe(self, token): 81 | if token in self.cache: 82 | return self.cache[token] 83 | word = tuple(token[:-1]) + ( token[-1] + '',) 84 | pairs = get_pairs(word) 85 | 86 | if not pairs: 87 | return token+'' 88 | 89 | while True: 90 | bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) 91 | if bigram not in self.bpe_ranks: 92 | break 93 | first, second = bigram 94 | new_word = [] 95 | i = 0 96 | while i < len(word): 97 | try: 98 | j = word.index(first, i) 99 | new_word.extend(word[i:j]) 100 | i = j 101 | except: 102 | new_word.extend(word[i:]) 103 | break 104 | 105 | if word[i] == first and i < len(word)-1 and word[i+1] == second: 106 | new_word.append(first+second) 107 | i += 2 108 | else: 109 | new_word.append(word[i]) 110 | i += 1 111 | new_word = tuple(new_word) 112 | word = new_word 113 | if len(word) == 1: 114 | break 115 | else: 116 | pairs = get_pairs(word) 117 | word = ' '.join(word) 118 | self.cache[token] = word 119 | return word 120 | 121 | def encode(self, text): 122 | bpe_tokens = [] 123 | text = whitespace_clean(basic_clean(text)).lower() 124 | for token in re.findall(self.pat, text): 125 | token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) 126 | bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) 127 | return bpe_tokens 128 | 129 | def decode(self, tokens): 130 | text = ''.join([self.decoder[token] for token in tokens]) 131 | text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') 132 | return text 133 | -------------------------------------------------------------------------------- /configs/modelnet40.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/modelnet40_c.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/modelnet40_views.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/modelnet_c.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/modelnet_c_sdxl.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/modelnet_c_views.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/objaverse_lvis.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from imagenet_a.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 5.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/omniobject3d.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/pointda_modelnet.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | 3 | # --- Positive Cache Configuration --- 4 | positive: 5 | enabled: True 6 | shot_capacity: 3 7 | alpha: 4.0 8 | beta: 8.0 9 | 10 | # --- Negative Cache Configuration --- 11 | negative: 12 | enabled: True 13 | shot_capacity: 2 14 | alpha: 0.117 15 | beta: 1.0 16 | entropy_threshold: 17 | lower: 0.2 18 | upper: 0.5 19 | mask_threshold: 20 | lower: 0.03 21 | upper: 1.0 -------------------------------------------------------------------------------- /configs/pointda_scannet.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | 3 | # --- Positive Cache Configuration --- 4 | positive: 5 | enabled: True 6 | shot_capacity: 3 7 | alpha: 4.0 8 | beta: 8.0 9 | 10 | # --- Negative Cache Configuration --- 11 | negative: 12 | enabled: True 13 | shot_capacity: 2 14 | alpha: 0.117 15 | beta: 1.0 16 | entropy_threshold: 17 | lower: 0.2 18 | upper: 0.5 19 | mask_threshold: 20 | lower: 0.03 21 | upper: 1.0 -------------------------------------------------------------------------------- /configs/pointda_shapenet.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | 3 | # --- Positive Cache Configuration --- 4 | positive: 5 | enabled: True 6 | shot_capacity: 3 7 | alpha: 4.0 8 | beta: 8.0 9 | 10 | # --- Negative Cache Configuration --- 11 | negative: 12 | enabled: True 13 | shot_capacity: 2 14 | alpha: 0.117 15 | beta: 1.0 16 | entropy_threshold: 17 | lower: 0.2 18 | upper: 0.5 19 | mask_threshold: 20 | lower: 0.03 21 | upper: 1.0 -------------------------------------------------------------------------------- /configs/scanobjectnn.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from eurosat.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 4.0 9 | beta: 8.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/scanobjnn.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from eurosat.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 4.0 9 | beta: 8.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/sim2real_sonn.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | 3 | # --- Positive Cache Configuration --- 4 | positive: 5 | enabled: True 6 | shot_capacity: 3 7 | alpha: 4.0 8 | beta: 8.0 9 | 10 | # --- Negative Cache Configuration --- 11 | negative: 12 | enabled: True 13 | shot_capacity: 2 14 | alpha: 0.117 15 | beta: 1.0 16 | entropy_threshold: 17 | lower: 0.2 18 | upper: 0.5 19 | mask_threshold: 20 | lower: 0.03 21 | upper: 1.0 -------------------------------------------------------------------------------- /configs/snv2_c.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /configs/sonn_c.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for TDA Hyperparameters 2 | # NOTE *** copy from dtd.yaml 3 | 4 | # --- Positive Cache Configuration --- 5 | positive: 6 | enabled: True 7 | shot_capacity: 3 8 | alpha: 2.0 9 | beta: 3.0 10 | 11 | # --- Negative Cache Configuration --- 12 | negative: 13 | enabled: True 14 | shot_capacity: 2 15 | alpha: 0.117 16 | beta: 1.0 17 | entropy_threshold: 18 | lower: 0.2 19 | upper: 0.5 20 | mask_threshold: 21 | lower: 0.03 22 | upper: 1.0 -------------------------------------------------------------------------------- /datasets/augmix_ops.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Base augmentations operators.""" 16 | 17 | import numpy as np 18 | from PIL import Image, ImageOps, ImageEnhance 19 | 20 | # ImageNet code should change this value 21 | IMAGE_SIZE = 224 22 | 23 | 24 | def int_parameter(level, maxval): 25 | """Helper function to scale `val` between 0 and maxval . 26 | Args: 27 | level: Level of the operation that will be between [0, `PARAMETER_MAX`]. 28 | maxval: Maximum value that the operation can have. This will be scaled to 29 | level/PARAMETER_MAX. 30 | Returns: 31 | An int that results from scaling `maxval` according to `level`. 32 | """ 33 | return int(level * maxval / 10) 34 | 35 | 36 | def float_parameter(level, maxval): 37 | """Helper function to scale `val` between 0 and maxval. 38 | Args: 39 | level: Level of the operation that will be between [0, `PARAMETER_MAX`]. 40 | maxval: Maximum value that the operation can have. This will be scaled to 41 | level/PARAMETER_MAX. 42 | Returns: 43 | A float that results from scaling `maxval` according to `level`. 44 | """ 45 | return float(level) * maxval / 10. 46 | 47 | 48 | def sample_level(n): 49 | return np.random.uniform(low=0.1, high=n) 50 | 51 | 52 | def autocontrast(pil_img, _): 53 | return ImageOps.autocontrast(pil_img) 54 | 55 | 56 | def equalize(pil_img, _): 57 | return ImageOps.equalize(pil_img) 58 | 59 | 60 | def posterize(pil_img, level): 61 | level = int_parameter(sample_level(level), 4) 62 | return ImageOps.posterize(pil_img, 4 - level) 63 | 64 | 65 | def rotate(pil_img, level): 66 | degrees = int_parameter(sample_level(level), 30) 67 | if np.random.uniform() > 0.5: 68 | degrees = -degrees 69 | return pil_img.rotate(degrees, resample=Image.BILINEAR) 70 | 71 | 72 | def solarize(pil_img, level): 73 | level = int_parameter(sample_level(level), 256) 74 | return ImageOps.solarize(pil_img, 256 - level) 75 | 76 | 77 | def shear_x(pil_img, level): 78 | level = float_parameter(sample_level(level), 0.3) 79 | if np.random.uniform() > 0.5: 80 | level = -level 81 | return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), 82 | Image.AFFINE, (1, level, 0, 0, 1, 0), 83 | resample=Image.BILINEAR) 84 | 85 | 86 | def shear_y(pil_img, level): 87 | level = float_parameter(sample_level(level), 0.3) 88 | if np.random.uniform() > 0.5: 89 | level = -level 90 | return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), 91 | Image.AFFINE, (1, 0, 0, level, 1, 0), 92 | resample=Image.BILINEAR) 93 | 94 | 95 | def translate_x(pil_img, level): 96 | level = int_parameter(sample_level(level), IMAGE_SIZE / 3) 97 | if np.random.random() > 0.5: 98 | level = -level 99 | return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), 100 | Image.AFFINE, (1, 0, level, 0, 1, 0), 101 | resample=Image.BILINEAR) 102 | 103 | 104 | def translate_y(pil_img, level): 105 | level = int_parameter(sample_level(level), IMAGE_SIZE / 3) 106 | if np.random.random() > 0.5: 107 | level = -level 108 | return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), 109 | Image.AFFINE, (1, 0, 0, 0, 1, level), 110 | resample=Image.BILINEAR) 111 | 112 | 113 | # operation that overlaps with ImageNet-C's test set 114 | def color(pil_img, level): 115 | level = float_parameter(sample_level(level), 1.8) + 0.1 116 | return ImageEnhance.Color(pil_img).enhance(level) 117 | 118 | 119 | # operation that overlaps with ImageNet-C's test set 120 | def contrast(pil_img, level): 121 | level = float_parameter(sample_level(level), 1.8) + 0.1 122 | return ImageEnhance.Contrast(pil_img).enhance(level) 123 | 124 | 125 | # operation that overlaps with ImageNet-C's test set 126 | def brightness(pil_img, level): 127 | level = float_parameter(sample_level(level), 1.8) + 0.1 128 | return ImageEnhance.Brightness(pil_img).enhance(level) 129 | 130 | 131 | # operation that overlaps with ImageNet-C's test set 132 | def sharpness(pil_img, level): 133 | level = float_parameter(sample_level(level), 1.8) + 0.1 134 | return ImageEnhance.Sharpness(pil_img).enhance(level) 135 | 136 | 137 | augmentations = [ 138 | autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, 139 | translate_x, translate_y 140 | ] 141 | 142 | augmentations_all = [ 143 | autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, 144 | translate_x, translate_y, color, contrast, brightness, sharpness 145 | ] -------------------------------------------------------------------------------- /datasets/modelnet40.py: -------------------------------------------------------------------------------- 1 | import os 2 | import copy 3 | import json 4 | import numpy as np 5 | 6 | import torch 7 | from torch.utils.data import Dataset 8 | 9 | from .templates import text_prompts, mn40_gpt35_prompts, mn40_gpt4_prompts, mn40_pointllm_prompts 10 | 11 | 12 | def pc_normalize(pc): 13 | ''' NOTE what's the difference between `pc_normalize` and `normalize_pc`??? ''' 14 | centroid = np.mean(pc, axis=0) 15 | pc = pc - centroid 16 | m = np.max(np.sqrt(np.sum(pc**2, axis=1))) 17 | pc = pc / m 18 | return pc 19 | 20 | 21 | def normalize_pc(pc): 22 | # normalize pc to [-1, 1] 23 | pc = pc - np.mean(pc, axis=0) 24 | if np.max(np.linalg.norm(pc, axis=1)) < 1e-6: 25 | pc = np.zeros_like(pc) 26 | else: 27 | pc = pc / np.max(np.linalg.norm(pc, axis=1)) 28 | return pc 29 | 30 | 31 | class ModelNet40(Dataset): 32 | def __init__(self, config): 33 | self.lm3d = config.lm3d 34 | # option 1: use the manual template from `templates.py` 35 | self.template = text_prompts 36 | # option 2: use the responses from the LLM 37 | # self.template = mn40_gpt35_prompts 38 | # self.template = mn40_gpt4_prompts 39 | # self.template = mn40_pointllm_prompts 40 | 41 | self.npoints = config.npoints 42 | self.data_path = config.modelnet40_root 43 | self.catfile = os.path.join(self.data_path, 'classnames.txt') 44 | self.classnames = [line.rstrip() for line in open(self.catfile)] 45 | self.classes = dict(zip(self.classnames, range(len(self.classnames)))) 46 | 47 | self.pcs = np.load('%s/test_pc.npy' % self.data_path, allow_pickle=True) 48 | self.openshape_split = json.load(open('%s/test_split.json' % self.data_path, "r")) 49 | 50 | self.cate_to_id = {} 51 | for i in range(len(self.classnames)): 52 | self.cate_to_id[self.classnames[i]] = str(i) 53 | 54 | def __len__(self): 55 | return len(self.openshape_split) 56 | 57 | def __getitem__(self, idx): 58 | pc = copy.deepcopy(self.pcs[idx]) 59 | 60 | xyz = pc['xyz'][:self.npoints] 61 | rgb = pc['rgb'][:self.npoints] 62 | rgb = rgb / 255.0 # 100, scale to 0.4 to make it consistent with the training data 63 | rgb = torch.from_numpy(rgb).float() 64 | 65 | # NOTE swap y,z axises 66 | if self.lm3d == 'openshape': 67 | xyz[:, [1, 2]] = xyz[:, [2, 1]] 68 | xyz = normalize_pc(xyz) 69 | else: 70 | xyz[:, 0:3] = pc_normalize(xyz[:, 0:3]) 71 | 72 | xyz = torch.from_numpy(xyz).float() 73 | 74 | label_name = self.openshape_split[idx]["category"] 75 | label = np.array([int(self.cate_to_id[label_name])]).astype(np.int32) 76 | 77 | return xyz, label[0], label_name, rgb -------------------------------------------------------------------------------- /datasets/modelnet40_c.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | from torch.utils.data import Dataset 5 | 6 | from .templates import text_prompts 7 | 8 | 9 | class ModelNet40_C(Dataset): 10 | """ModelNer40-C(orruption). 11 | 12 | This dataset is used for testing only. 13 | """ 14 | 15 | def __init__(self, cfg): 16 | self.template = text_prompts 17 | 18 | self.dataset_dir = cfg.modelnet40_c_root 19 | 20 | self.classnames = [] 21 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 22 | with open(text_file, 'r') as f: 23 | lines = f.readlines() 24 | for line in lines: 25 | classname = line.strip() 26 | self.classnames.append(classname) 27 | 28 | cor_type = cfg.cor_type 29 | data_file = f'data_{cor_type}.npy' # e.g., background_1, cutout_1, density_1 30 | 31 | self.test_data = np.load(f'{self.dataset_dir}/{data_file}') 32 | self.test_label = np.load(f'{self.dataset_dir}/label.npy') 33 | 34 | self.npoints = cfg.npoints 35 | 36 | def __len__(self): 37 | return len(self.test_label) 38 | 39 | def __getitem__(self, idx): 40 | pc = self.test_data[idx].astype(np.float32) 41 | label = self.test_label[idx].astype(np.int32) 42 | classname = self.classnames[int(label)] 43 | 44 | rgb = np.ones_like(pc) * 0.4 45 | return pc, label, classname, rgb 46 | -------------------------------------------------------------------------------- /datasets/modelnet40_sdxl.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | 4 | import torch 5 | from torch.utils.data import Dataset 6 | from torchvision import transforms 7 | 8 | from .templates import text_prompts, text_prompts_pc2_view 9 | 10 | 11 | class ModelNet40_SDXL(Dataset): 12 | """ This dataset is used for testing only. """ 13 | 14 | def __init__(self, cfg): 15 | self.template = text_prompts_pc2_view 16 | 17 | self.dataset_dir = cfg.modelnet40_sdxl_root 18 | 19 | self.classnames = [] 20 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 21 | with open(text_file, 'r') as f: 22 | lines = f.readlines() 23 | for line in lines: 24 | classname = line.strip() 25 | self.classnames.append(classname) 26 | 27 | self.pc_img_path = [] 28 | self.labels = [] 29 | for cls in self.classnames: 30 | d = os.path.join(self.dataset_dir, cls) 31 | self.pc_img_path.append(d) 32 | 33 | label = self.classnames.index(cls) 34 | self.labels.append(label) 35 | 36 | # --- option 1. try simplest data augmentation 37 | self.transform = transforms.Compose([ 38 | transforms.Resize((cfg.imsize, cfg.imsize)), 39 | transforms.ToTensor(), 40 | transforms.Normalize(mean=[0.485, 0.456, 0.406], 41 | std=[0.229, 0.224, 0.225]) 42 | ]) 43 | 44 | # --- option 2. data augmentation provided by OpenAI `CLIP` or `open_clip` 45 | # self.transform = preprocess_val 46 | 47 | def __len__(self): 48 | return len(self.pc_img_path) 49 | 50 | def __getitem__(self, idx): 51 | pc_img_dir = self.pc_img_path[idx] 52 | 53 | imgs = [] 54 | for sdxl_img in os.listdir(pc_img_dir): 55 | img = Image.open(os.path.join(pc_img_dir, sdxl_img)).convert('RGB') 56 | if self.transform: 57 | # img: (3, 224, 224) 58 | img = self.transform(img) 59 | imgs.append(img) 60 | 61 | label = self.labels[idx] 62 | 63 | return torch.stack(imgs), label -------------------------------------------------------------------------------- /datasets/modelnet40_views.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | 4 | import torch 5 | from torch.utils.data import Dataset 6 | from torchvision import transforms 7 | 8 | from .templates import text_prompts, text_prompts_pc2_view 9 | 10 | 11 | class ModelNet40_Views(Dataset): 12 | """Multiple Projection Views of `clean` ModelNet40 13 | 14 | This dataset is used for testing only. 15 | """ 16 | 17 | def __init__(self, cfg): 18 | # self.template = text_prompts 19 | self.template = text_prompts_pc2_view 20 | 21 | self.dataset_dir = cfg.modelnet40_views_root 22 | 23 | self.classnames = [] 24 | text_file = os.path.join(self.dataset_dir, 'classnames.txt') 25 | with open(text_file, 'r') as f: 26 | lines = f.readlines() 27 | for line in lines: 28 | classname = line.strip() 29 | self.classnames.append(classname) 30 | 31 | self.pc_view_path = [] 32 | pc_id = 0 33 | for item in os.listdir(self.dataset_dir): 34 | d = os.path.join(self.dataset_dir, item) 35 | if os.path.isdir(d): 36 | self.pc_view_path.append(os.path.join(self.dataset_dir, str(pc_id))) 37 | pc_id += 1 38 | 39 | self.labels = [] 40 | with open(os.path.join(self.dataset_dir, 'labels.txt')) as fin: 41 | lines = fin.readlines() 42 | for line in lines: 43 | self.labels.append(int(line.strip())) 44 | 45 | # --- option 1. try simplest data augmentation 46 | self.transform = transforms.Compose([ 47 | transforms.ToTensor(), 48 | transforms.Normalize(mean=[0.485, 0.456, 0.406], 49 | std=[0.229, 0.224, 0.225]) 50 | ]) 51 | 52 | # --- option 2. data augmentation provided by OpenAI `CLIP` or `open_clip` 53 | # self.transform = preprocess_val 54 | 55 | def __len__(self): 56 | return len(self.pc_view_path) 57 | 58 | def __getitem__(self, idx): 59 | pc_view_dir = self.pc_view_path[idx] 60 | 61 | imgs = [] 62 | for view in os.listdir(pc_view_dir): 63 | img = Image.open(os.path.join(pc_view_dir, view)).convert('RGB') 64 | if self.transform: 65 | # img: (3, 224, 224) 66 | img = self.transform(img) 67 | imgs.append(img) 68 | 69 | label = self.labels[idx] 70 | 71 | return torch.stack(imgs), label -------------------------------------------------------------------------------- /datasets/modelnet_c.py: -------------------------------------------------------------------------------- 1 | import os 2 | import h5py 3 | import numpy as np 4 | from collections import OrderedDict 5 | 6 | import torch 7 | from torch.utils.data import Dataset 8 | 9 | from .templates import text_prompts 10 | 11 | 12 | class ModelNet_C(Dataset): 13 | """ModelNet_C(orruption). 14 | 15 | This dataset is used for testing only. 16 | """ 17 | 18 | def __init__(self, cfg): 19 | self.lm3d = cfg.lm3d 20 | 21 | self.template = text_prompts 22 | 23 | self.dataset_dir = cfg.modelnet_c_root 24 | 25 | self.classnames = [] 26 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 27 | with open(text_file, 'r') as f: 28 | lines = f.readlines() 29 | for line in lines: 30 | classname = line.strip() 31 | self.classnames.append(classname) 32 | 33 | cor_type = cfg.cor_type 34 | data_file = f'{cor_type}.h5' 35 | 36 | f = h5py.File(f'{self.dataset_dir}/{data_file}') 37 | self.test_data = f['data'][:] 38 | self.test_label = f['label'][:] 39 | 40 | self.npoints = cfg.npoints 41 | 42 | def __len__(self): 43 | return len(self.test_label) 44 | 45 | def __getitem__(self, idx): 46 | pc = self.test_data[idx].astype(np.float32) 47 | 48 | # NOTE swap y,z axises 49 | if self.lm3d == 'openshape': 50 | pc[:, [1, 2]] = pc[:, [2, 1]] 51 | 52 | label = self.test_label[idx].astype(np.int32) 53 | classname = self.classnames[int(label)] 54 | 55 | rgb = np.ones_like(pc) * 0.4 56 | return pc, label, classname, rgb -------------------------------------------------------------------------------- /datasets/modelnet_c_views.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | 4 | import torch 5 | from torch.utils.data import Dataset 6 | from torchvision import transforms 7 | 8 | from .templates import text_prompts, text_prompts_pc2_view 9 | 10 | 11 | class ModelNet_C_Views(Dataset): 12 | """ModelNet_C(orruption). 13 | 14 | This dataset is used for testing only. 15 | """ 16 | 17 | def __init__(self, cfg): 18 | # self.template = text_prompts 19 | self.template = text_prompts_pc2_view 20 | 21 | self.dataset_dir = cfg.modelnet_c_views_root 22 | self.cor_type = cfg.cor_type 23 | 24 | self.classnames = [] 25 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 26 | with open(text_file, 'r') as f: 27 | lines = f.readlines() 28 | for line in lines: 29 | classname = line.strip() 30 | self.classnames.append(classname) 31 | 32 | self.pc_view_path = [] 33 | pc_id = 0 34 | for item in os.listdir(os.path.join(self.dataset_dir, self.cor_type)): 35 | d = os.path.join(self.dataset_dir, self.cor_type, item) 36 | if os.path.isdir(d): 37 | self.pc_view_path.append(os.path.join(self.dataset_dir, self.cor_type, str(pc_id))) 38 | pc_id += 1 39 | 40 | self.labels = [] 41 | with open(os.path.join(self.dataset_dir, self.cor_type, 'labels.txt')) as fin: 42 | lines = fin.readlines() 43 | for line in lines: 44 | self.labels.append(int(line.strip())) 45 | 46 | # --- option 1. try simplest data augmentation 47 | self.transform = transforms.Compose([ 48 | transforms.ToTensor(), 49 | transforms.Normalize(mean=[0.485, 0.456, 0.406], 50 | std=[0.229, 0.224, 0.225]) 51 | ]) 52 | 53 | # --- option 2. data augmentation provided by OpenAI `CLIP` or `open_clip` 54 | # self.transform = preprocess_val 55 | 56 | def __len__(self): 57 | return len(self.pc_view_path) 58 | 59 | def __getitem__(self, idx): 60 | pc_view_dir = self.pc_view_path[idx] 61 | 62 | imgs = [] 63 | for view in os.listdir(pc_view_dir): 64 | img = Image.open(os.path.join(pc_view_dir, view)).convert('RGB') 65 | if self.transform: 66 | # img: (3, 224, 224) 67 | img = self.transform(img) 68 | imgs.append(img) 69 | 70 | label = self.labels[idx] 71 | 72 | return torch.stack(imgs), label -------------------------------------------------------------------------------- /datasets/omniobject3d.py: -------------------------------------------------------------------------------- 1 | import os 2 | from plyfile import PlyData 3 | import numpy as np 4 | 5 | from torch.utils.data import Dataset 6 | 7 | from .templates import text_prompts, omni3d_gpt35_prompts 8 | 9 | 10 | def normalize_pc(pc): 11 | # normalize pc to [-1, 1] 12 | pc = pc - np.mean(pc, axis=0) 13 | if np.max(np.linalg.norm(pc, axis=1)) < 1e-6: 14 | pc = np.zeros_like(pc) 15 | else: 16 | pc = pc / np.max(np.linalg.norm(pc, axis=1)) 17 | return pc 18 | 19 | 20 | class OmniObject3D(Dataset): 21 | 22 | def __init__(self, cfg): 23 | self.lm3d = cfg.lm3d 24 | 25 | # option 1: use the manual template from `templates.py` 26 | self.template = text_prompts 27 | # option 2: use the responses from the LLM 28 | # self.template = omni3d_gpt35_prompts 29 | 30 | self.dataset_dir = cfg.omniobject3d_root 31 | self.num_points = cfg.npoints 32 | 33 | self.classnames = self.set_classnames() 34 | 35 | self.test_data, self.test_label = self.load_data() 36 | 37 | def set_classnames(self): 38 | classnames = [] 39 | print('===', f'{self.dataset_dir}/{self.num_points}', '===') 40 | for cls in os.listdir(f'{self.dataset_dir}/{self.num_points}'): 41 | if os.path.isdir(os.path.join(f'{self.dataset_dir}/{self.num_points}', cls)): 42 | classnames.append(cls) 43 | 44 | return sorted(classnames) 45 | 46 | def load_data(self): 47 | all_data = [] 48 | all_label = [] 49 | 50 | data_dir1 = f'{self.dataset_dir}/{self.num_points}' 51 | for cls in os.listdir(data_dir1): 52 | data_dir2 = os.path.join(data_dir1, cls) 53 | for ins in os.listdir(data_dir2): 54 | data_dir3 = os.path.join(data_dir2, ins) 55 | if not os.listdir(data_dir3): # empty dir 56 | continue 57 | 58 | data_f = os.path.join(data_dir3, f'pcd_{self.num_points}.ply') 59 | plydata = PlyData.read(data_f) 60 | x = plydata.elements[0].data['x'] 61 | y = plydata.elements[0].data['y'] 62 | z = plydata.elements[0].data['z'] 63 | # a whole point cloud 64 | pts = np.stack([x,y,z], axis=0).T 65 | # pc's label 66 | label = self.classnames.index(cls) 67 | all_data.append(pts) 68 | all_label.append(label) 69 | 70 | all_data = np.array(all_data) 71 | all_label = np.array(all_label) 72 | 73 | return all_data, all_label 74 | 75 | def __len__(self): 76 | return len(self.test_label) 77 | 78 | def __getitem__(self, idx): 79 | pc = self.test_data[idx].astype(np.float32) 80 | 81 | # NOTE swap y,z axises 82 | if self.lm3d == 'openshape': 83 | pc[:, [1, 2]] = pc[:, [2, 1]] 84 | 85 | # NOTE it's necessary for `omin3d` to normalize the pc for performance 86 | pc = normalize_pc(pc) 87 | 88 | label = self.test_label[idx].astype(np.int32) 89 | classname = self.classnames[int(label)] 90 | 91 | rgb = np.ones_like(pc) * 0.4 92 | 93 | return pc, label, classname, rgb 94 | -------------------------------------------------------------------------------- /datasets/pointda_modelnet.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | from torch.utils.data import Dataset 5 | 6 | from .templates import text_prompts 7 | from .utils import normalize_pc, pc_normalize 8 | 9 | 10 | class PointDA_ModelNet(Dataset): 11 | def __init__(self, cfg): 12 | self.lm3d = cfg.lm3d 13 | self.template = text_prompts 14 | 15 | self.dataset_dir = os.path.join('data/xset/pointda', 'modelnet') 16 | self.classnames, name2idx = self.read_classnames(self.dataset_dir) 17 | 18 | self.test_data, self.test_label = self.load_data(name2idx, 'test') 19 | 20 | def load_data(self, name2idx, split): 21 | data_list, label_list = [], [] 22 | for cls in os.listdir(self.dataset_dir): 23 | cls_dir = os.path.join(self.dataset_dir, cls) # data/pointda/modelnet/bed 24 | 25 | if os.path.isdir(cls_dir): 26 | dir_f = os.path.join(cls_dir, split) # data/pointda/modelnet/bed/train 27 | label = name2idx[cls] 28 | 29 | for f in os.listdir(dir_f): 30 | if f.endswith('.npy'): 31 | # shape: (2048, 3) -> (1, 2048, 3) 32 | points = np.expand_dims(np.load(os.path.join(dir_f, f)), axis=0) 33 | data_list.append(points) 34 | label_list.append([label]) 35 | 36 | data = np.concatenate(data_list, axis=0).astype('float32') 37 | label = np.array(label_list).astype("int64") 38 | 39 | return data, label 40 | 41 | @staticmethod 42 | def read_classnames(dataset_dir): 43 | classnames = [] 44 | name2idx = dict() 45 | 46 | names = sorted(os.listdir(dataset_dir)) 47 | 48 | for idx, name in enumerate(names): 49 | if os.path.isdir(os.path.join(dataset_dir, name)): 50 | classnames.append(name) 51 | name2idx[name] = idx 52 | 53 | return classnames, name2idx 54 | 55 | def __len__(self): 56 | return len(self.test_label) 57 | 58 | def __getitem__(self, idx): 59 | xyz = self.test_data[idx] 60 | label = self.test_label[idx] 61 | cname = self.classnames[int(label)] 62 | 63 | if self.lm3d == 'openshape': 64 | xyz[:, [1, 2]] = xyz[:, [2, 1]] 65 | xyz = normalize_pc(xyz) 66 | else: 67 | xyz = pc_normalize(xyz) 68 | 69 | rgb = np.ones_like(xyz) * 0.4 70 | 71 | return xyz, label, cname, rgb -------------------------------------------------------------------------------- /datasets/pointda_scannet.py: -------------------------------------------------------------------------------- 1 | import os 2 | import h5py 3 | import numpy as np 4 | 5 | from torch.utils.data import Dataset 6 | 7 | from .templates import text_prompts 8 | from .utils import normalize_pc, pc_normalize 9 | 10 | 11 | class PointDA_ScanNet(Dataset): 12 | def __init__(self, cfg): 13 | self.lm3d = cfg.lm3d 14 | self.template = text_prompts 15 | 16 | self.dataset_dir = os.path.join('data/xset/pointda', 'scannet') 17 | 18 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 19 | self.classnames = self.read_classnames(text_file) 20 | 21 | self.test_data, self.test_label = self.load_data(os.path.join(self.dataset_dir, 'test_files.txt')) 22 | 23 | def load_data(self, data_path): 24 | all_data = [] 25 | all_label = [] 26 | with open(data_path, "r") as f: 27 | for h5_name in f.readlines(): 28 | f = h5py.File(h5_name.strip(), 'r') 29 | data = f['data'][:].astype('float32') 30 | label = f['label'][:].astype('int64') 31 | f.close() 32 | all_data.append(data) 33 | all_label.append(label) 34 | # NOTE each point has 6 dimensions: first 3 coordinates, laast 3 colors 35 | all_data = np.concatenate(all_data, axis=0)[:, :, :6] 36 | all_label = np.concatenate(all_label, axis=0) 37 | 38 | return all_data, all_label 39 | 40 | @staticmethod 41 | def read_classnames(text_file): 42 | classnames = [] 43 | with open(text_file, 'r') as f: 44 | lines = f.readlines() 45 | for i, line in enumerate(lines): 46 | classname = line.strip() 47 | classnames.append(classname) 48 | 49 | return classnames 50 | 51 | def __len__(self): 52 | return len(self.test_label) 53 | 54 | def __getitem__(self, idx): 55 | """ NOTE each point has 6 dimension: xyz, rgb """ 56 | xyz = self.test_data[idx][:, :3] 57 | rgb = self.test_data[idx][:, 3:] 58 | 59 | label = self.test_label[idx] 60 | cname = self.classnames[int(label)] 61 | 62 | if self.lm3d == 'openshape': 63 | xyz[:, [1, 2]] = xyz[:, [2, 1]] 64 | xyz = normalize_pc(xyz) 65 | rgb = normalize_pc(rgb) 66 | else: 67 | xyz = pc_normalize(xyz) 68 | rgb = normalize_pc(rgb) 69 | 70 | return xyz, label, cname, rgb 71 | -------------------------------------------------------------------------------- /datasets/pointda_shapenet.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | from torch.utils.data import Dataset 5 | 6 | from .templates import text_prompts 7 | from .utils import normalize_pc, pc_normalize 8 | 9 | 10 | class PointDA_ShapeNet(Dataset): 11 | def __init__(self, cfg): 12 | self.lm3d = cfg.lm3d 13 | self.template = text_prompts 14 | 15 | self.dataset_dir = os.path.join('data/xset/pointda', 'shapenet') 16 | self.classnames, name2idx = self.read_classnames(self.dataset_dir) 17 | 18 | self.test_data, self.test_label = self.load_data(name2idx, 'test') 19 | 20 | def load_data(self, name2idx, split): 21 | data_list, label_list = [], [] 22 | for cls in os.listdir(self.dataset_dir): 23 | cls_dir = os.path.join(self.dataset_dir, cls) # data/pointda/shapenet/bed 24 | 25 | if os.path.isdir(cls_dir): 26 | dir_f = os.path.join(cls_dir, split) # data/pointda/shapenet/bed/train 27 | label = name2idx[cls] 28 | 29 | for f in os.listdir(dir_f): 30 | if f.endswith('.npy'): 31 | # shape: (2048, 3) -> (1, 2048, 3) 32 | points = np.expand_dims(np.load(os.path.join(dir_f, f)), axis=0) 33 | data_list.append(points) 34 | label_list.append([label]) 35 | 36 | data = np.concatenate(data_list, axis=0).astype('float32') 37 | label = np.array(label_list).astype("int64") 38 | 39 | return data, label 40 | 41 | @staticmethod 42 | def read_classnames(dataset_dir): 43 | classnames = [] 44 | name2idx = dict() 45 | 46 | names = sorted(os.listdir(dataset_dir)) 47 | 48 | for idx, name in enumerate(names): 49 | if os.path.isdir(os.path.join(dataset_dir, name)): 50 | classnames.append(name) 51 | name2idx[name] = idx 52 | 53 | return classnames, name2idx 54 | 55 | def __len__(self): 56 | return len(self.test_label) 57 | 58 | def __getitem__(self, idx): 59 | xyz = self.test_data[idx] 60 | label = self.test_label[idx] 61 | cname = self.classnames[int(label)] 62 | 63 | if self.lm3d == 'openshape': 64 | xyz[:, [1, 2]] = xyz[:, [2, 1]] 65 | xyz = normalize_pc(xyz) 66 | else: 67 | xyz = pc_normalize(xyz) 68 | 69 | rgb = np.ones_like(xyz) * 0.4 70 | 71 | return xyz, label, cname, rgb -------------------------------------------------------------------------------- /datasets/scanobjectnn.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import h5py 4 | import numpy as np 5 | 6 | from torch.utils.data import Dataset 7 | 8 | from .templates import text_prompts 9 | 10 | 11 | class ScanObjectNN(Dataset): 12 | """ 13 | This dataset is used for testing only. 14 | """ 15 | 16 | def __init__(self, cfg): 17 | self.lm3d = cfg.lm3d 18 | 19 | self.template = text_prompts 20 | 21 | self.dataset_dir = cfg.scanobjectnn_root 22 | self.dataset_variant = cfg.sonn_variant 23 | 24 | self.classnames = [] 25 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 26 | with open(text_file, 'r') as f: 27 | lines = f.readlines() 28 | for line in lines: 29 | classname = line.strip() 30 | self.classnames.append(classname) 31 | 32 | data_file_list = glob.glob(f'{self.dataset_dir}/{self.dataset_variant}/test_*.h5') 33 | 34 | f = h5py.File(data_file_list[0]) 35 | self.test_data = f['data'][:] 36 | self.test_label = f['label'][:] 37 | 38 | self.npoints = cfg.npoints 39 | 40 | def __len__(self): 41 | return len(self.test_label) 42 | 43 | def __getitem__(self, idx): 44 | pc = self.test_data[idx][:self.npoints].astype(np.float32) 45 | 46 | # NOTE swap y,z axises 47 | if self.lm3d == 'openshape': 48 | pc[:, [1, 2]] = pc[:, [2, 1]] 49 | 50 | label = self.test_label[idx].astype(np.int32) 51 | classname = self.classnames[int(label)] 52 | 53 | rgb = np.ones_like(pc) * 0.4 54 | return pc, label, classname, rgb -------------------------------------------------------------------------------- /datasets/scanobjnn.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import numpy as np 3 | 4 | import torch 5 | from torch.utils.data import Dataset 6 | 7 | from .templates import text_prompts, sonn_gpt35_prompts, sonn_gpt4_prompts, sonn_pointllm_prompts 8 | 9 | 10 | def pc_normalize(pc): 11 | ''' NOTE what's the difference between `pc_normalize` and `normalize_pc`??? ''' 12 | centroid = np.mean(pc, axis=0) 13 | pc = pc - centroid 14 | m = np.max(np.sqrt(np.sum(pc**2, axis=1))) 15 | pc = pc / m 16 | return pc 17 | 18 | 19 | def normalize_pc(pc): 20 | # normalize pc to [-1, 1] 21 | pc = pc - np.mean(pc, axis=0) 22 | if np.max(np.linalg.norm(pc, axis=1)) < 1e-6: 23 | pc = np.zeros_like(pc) 24 | else: 25 | pc = pc / np.max(np.linalg.norm(pc, axis=1)) 26 | return pc 27 | 28 | 29 | class ScanObjNN(Dataset): 30 | def __init__(self, config): 31 | self.lm3d = config.lm3d 32 | 33 | # option 1: use the manual template from `templates.py` 34 | self.template = text_prompts 35 | # option 2: use the responses from the LLM 36 | # self.template = sonn_gpt35_prompts 37 | # self.template = sonn_gpt4_prompts 38 | # self.template = sonn_pointllm_prompts 39 | 40 | self.npoints = config.npoints 41 | self.data_path = config.scanobjnn_root 42 | 43 | self.classnames = ["bag", "bin", "box", "cabinet", "chair", "desk", "display", "door", 44 | "shelf", "table", "bed", "pillow", "sink", "sofa", "toilet"] 45 | 46 | self.openshape_data = np.load('%s/xyz_label.npy' % self.data_path, allow_pickle=True).item() 47 | 48 | def __len__(self): 49 | return len(self.openshape_data['xyz']) 50 | 51 | def pc_norm(self, pc): 52 | """ pc: NxC, return NxC """ 53 | centroid = np.mean(pc, axis=0) 54 | pc = pc - centroid 55 | m = np.max(np.sqrt(np.sum(pc ** 2, axis=1))) 56 | pc = pc / m 57 | return pc 58 | 59 | def __getitem__(self, index): 60 | pc = copy.deepcopy(self.openshape_data['xyz'][index][:self.npoints]) 61 | 62 | xyz = pc 63 | 64 | if 'rgb' not in self.openshape_data: 65 | rgb = np.ones_like(xyz) * 0.4 66 | else: 67 | rgb = self.openshape_data['rgb'][index] 68 | 69 | if self.lm3d == 'openshape': 70 | xyz[:, [1, 2]] = xyz[:, [2, 1]] 71 | xyz = normalize_pc(xyz) 72 | else: 73 | xyz = pc_normalize(xyz) 74 | 75 | xyz = torch.from_numpy(xyz).float() 76 | rgb = torch.from_numpy(rgb).float() 77 | 78 | label = self.openshape_data['label'][index] 79 | label_name = self.classnames[label] 80 | label = label.astype(np.int32) 81 | 82 | return xyz, label, label_name, rgb -------------------------------------------------------------------------------- /datasets/sim2real_sonn.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | from torch.utils.data import Dataset 5 | 6 | from .templates import text_prompts 7 | 8 | 9 | def pc_normalize(pc): 10 | ''' NOTE what's the difference between `pc_normalize` and `normalize_pc`??? ''' 11 | centroid = np.mean(pc, axis=0) 12 | pc = pc - centroid 13 | m = np.max(np.sqrt(np.sum(pc**2, axis=1))) 14 | pc = pc / m 15 | return pc 16 | 17 | 18 | def normalize_pc(pc): 19 | # normalize pc to [-1, 1] 20 | pc = pc - np.mean(pc, axis=0) 21 | if np.max(np.linalg.norm(pc, axis=1)) < 1e-6: 22 | pc = np.zeros_like(pc) 23 | else: 24 | pc = pc / np.max(np.linalg.norm(pc, axis=1)) 25 | return pc 26 | 27 | 28 | class Sim2Real_SONN(Dataset): 29 | 30 | def __init__(self, cfg): 31 | """ 32 | This dataset is used for testing only. 33 | """ 34 | self.lm3d = cfg.lm3d 35 | self.template = text_prompts 36 | 37 | self.npoints = cfg.npoints # 2048 by default 38 | sim2real_type = cfg.sim2real_type 39 | self.dataset_dir = os.path.join('data/xset/sim2real', sim2real_type) 40 | 41 | self.classnames, name2idx = self.read_classnames(self.dataset_dir) 42 | 43 | self.test_data, self.test_label = self.load_data(name2idx, 'test') 44 | 45 | def load_data(self, name2idx, split): 46 | data_list, label_list = [], [] 47 | for cls in os.listdir(self.dataset_dir): 48 | cls_dir = os.path.join(self.dataset_dir, cls) # data/xset/sim2real/so_obj_only_9/bed 49 | 50 | if os.path.isdir(cls_dir): 51 | dir_f = os.path.join(cls_dir, split) # data/xset/sim2real/shapenet_9/bed/test 52 | label = name2idx[cls] 53 | 54 | for f in os.listdir(dir_f): 55 | # shape: (2048, 3) -> (1, 2048, 3) 56 | points = np.expand_dims(np.load(os.path.join(dir_f, f)), axis=0) 57 | data_list.append(points) 58 | label_list.append([label]) 59 | 60 | data = np.concatenate(data_list, axis=0).astype('float32') 61 | label = np.array(label_list).astype("int64") 62 | 63 | return data, label 64 | 65 | @staticmethod 66 | def read_classnames(dataset_dir): 67 | """Return a dictionary containing 68 | key-value pairs of : . 69 | """ 70 | classnames = [] 71 | name2idx = dict() 72 | 73 | names = sorted(os.listdir(dataset_dir)) 74 | 75 | for idx, name in enumerate(names): 76 | if os.path.isdir(os.path.join(dataset_dir, name)): 77 | classnames.append(name) 78 | name2idx[name] = idx 79 | 80 | return classnames, name2idx 81 | 82 | def __len__(self): 83 | return len(self.test_label) 84 | 85 | def __getitem__(self, idx): 86 | xyz = self.test_data[idx][:self.npoints] 87 | label = self.test_label[idx] 88 | cname = self.classnames[int(label)] 89 | 90 | if self.lm3d == 'openshape': 91 | xyz[:, [1, 2]] = xyz[:, [2, 1]] 92 | xyz = normalize_pc(xyz) 93 | else: 94 | xyz = pc_normalize(xyz) 95 | 96 | rgb = np.ones_like(xyz) * 0.4 97 | 98 | return xyz, label, cname, rgb 99 | -------------------------------------------------------------------------------- /datasets/snv2_c.py: -------------------------------------------------------------------------------- 1 | import os 2 | import h5py 3 | import numpy as np 4 | 5 | from torch.utils.data import Dataset 6 | 7 | from .templates import text_prompts 8 | 9 | 10 | class SNV2_C(Dataset): 11 | """SNV2_C(orruption). 12 | 13 | This dataset is used for testing only. 14 | """ 15 | 16 | def __init__(self, cfg): 17 | self.lm3d = cfg.lm3d 18 | 19 | self.template = text_prompts 20 | 21 | self.dataset_dir = cfg.snv2_c_root 22 | 23 | self.classnames = [] 24 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 25 | with open(text_file, 'r') as f: 26 | lines = f.readlines() 27 | for line in lines: 28 | classname = line.strip() 29 | self.classnames.append(classname) 30 | 31 | cor_type = cfg.cor_type 32 | data_file = f'{cor_type}.h5' 33 | 34 | f = h5py.File(f'{self.dataset_dir}/{data_file}') 35 | self.test_data = f['data'][:] 36 | self.test_label = f['label'][:] 37 | 38 | self.npoints = cfg.npoints 39 | 40 | def __len__(self): 41 | return len(self.test_label) 42 | 43 | def __getitem__(self, idx): 44 | pc = self.test_data[idx].astype(np.float32) 45 | 46 | # NOTE swap y,z axises 47 | if self.lm3d == 'openshape': 48 | pc[:, [1, 2]] = pc[:, [2, 1]] 49 | 50 | label = self.test_label[idx].astype(np.int32) 51 | classname = self.classnames[int(label)] 52 | 53 | rgb = np.ones_like(pc) * 0.4 54 | return pc, label, classname, rgb -------------------------------------------------------------------------------- /datasets/sonn_c.py: -------------------------------------------------------------------------------- 1 | import os 2 | import h5py 3 | import numpy as np 4 | 5 | from torch.utils.data import Dataset 6 | 7 | from .templates import text_prompts 8 | 9 | 10 | class SONN_C(Dataset): 11 | """SONN_C(orruption). 12 | 13 | This dataset is used for testing only. 14 | """ 15 | 16 | def __init__(self, cfg): 17 | self.lm3d = cfg.lm3d 18 | 19 | self.template = text_prompts 20 | 21 | self.dataset_dir = cfg.sonn_c_root 22 | self.dataset_variant = cfg.sonn_variant 23 | 24 | self.classnames = [] 25 | text_file = os.path.join(self.dataset_dir, 'shape_names.txt') 26 | with open(text_file, 'r') as f: 27 | lines = f.readlines() 28 | for line in lines: 29 | classname = line.strip() 30 | self.classnames.append(classname) 31 | 32 | cor_type = cfg.cor_type 33 | data_file = f'{cor_type}.h5' 34 | 35 | f = h5py.File(f'{self.dataset_dir}/{self.dataset_variant}/{data_file}') 36 | self.test_data = f['data'][:] 37 | self.test_label = f['label'][:] 38 | 39 | self.npoints = cfg.npoints 40 | 41 | def __len__(self): 42 | return len(self.test_label) 43 | 44 | def __getitem__(self, idx): 45 | pc = self.test_data[idx].astype(np.float32) 46 | 47 | # NOTE swap y,z axises 48 | if self.lm3d == 'openshape': 49 | pc[:, [1, 2]] = pc[:, [2, 1]] 50 | 51 | label = self.test_label[idx].astype(np.int32) 52 | classname = self.classnames[int(label)] 53 | 54 | rgb = np.ones_like(pc) * 0.4 55 | return pc, label, classname, rgb -------------------------------------------------------------------------------- /datasets/templates.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | text_prompts = [ 4 | "a point cloud model of {}.", 5 | "There is a {} in the scene.", 6 | "There is the {} in the scene.", 7 | "a photo of a {} in the scene.", 8 | "a photo of the {} in the scene.", 9 | "a photo of one {} in the scene.", 10 | "itap of a {}.", 11 | "itap of my {}.", 12 | "itap of the {}.", 13 | "a photo of a {}.", 14 | "a photo of my {}.", 15 | "a photo of the {}.", 16 | "a photo of one {}.", 17 | "a photo of many {}.", 18 | "a good photo of a {}.", 19 | "a good photo of the {}.", 20 | "a bad photo of a {}.", 21 | "a bad photo of the {}.", 22 | "a photo of a nice {}.", 23 | "a photo of the nice {}.", 24 | "a photo of a cool {}.", 25 | "a photo of the cool {}.", 26 | "a photo of a weird {}.", 27 | "a photo of the weird {}.", 28 | "a photo of a small {}.", 29 | "a photo of the small {}.", 30 | "a photo of a large {}.", 31 | "a photo of the large {}.", 32 | "a photo of a clean {}.", 33 | "a photo of the clean {}.", 34 | "a photo of a dirty {}.", 35 | "a photo of the dirty {}.", 36 | "a bright photo of a {}.", 37 | "a bright photo of the {}.", 38 | "a dark photo of a {}.", 39 | "a dark photo of the {}.", 40 | "a photo of a hard to see {}.", 41 | "a photo of the hard to see {}.", 42 | "a low resolution photo of a {}.", 43 | "a low resolution photo of the {}.", 44 | "a cropped photo of a {}.", 45 | "a cropped photo of the {}.", 46 | "a close-up photo of a {}.", 47 | "a close-up photo of the {}.", 48 | "a jpeg corrupted photo of a {}.", 49 | "a jpeg corrupted photo of the {}.", 50 | "a blurry photo of a {}.", 51 | "a blurry photo of the {}.", 52 | "a pixelated photo of a {}.", 53 | "a pixelated photo of the {}.", 54 | "a black and white photo of the {}.", 55 | "a black and white photo of a {}", 56 | "a plastic {}.", 57 | "the plastic {}.", 58 | "a toy {}.", 59 | "the toy {}.", 60 | "a plushie {}.", 61 | "the plushie {}.", 62 | "a cartoon {}.", 63 | "the cartoon {}.", 64 | "an embroidered {}.", 65 | "the embroidered {}.", 66 | "a painting of the {}.", 67 | "a painting of a {}." 68 | ] 69 | 70 | text_prompts_pc2_view = [ 71 | "a point cloud depth map of a(n) {}.", 72 | ] 73 | 74 | with open('llm/mn40_gpt35_prompts.json') as fin: 75 | mn40_gpt35_prompts = json.load(fin) 76 | 77 | with open('llm/mn40_gpt4_prompts.json') as fin: 78 | mn40_gpt4_prompts = json.load(fin) 79 | 80 | with open('llm/mn40_pointllm_prompts.json') as fin: 81 | mn40_pointllm_prompts = json.load(fin) 82 | 83 | 84 | with open('llm/sonn_gpt35_prompts.json') as fin: 85 | sonn_gpt35_prompts = json.load(fin) 86 | 87 | with open('llm/sonn_gpt4_prompts.json') as fin: 88 | sonn_gpt4_prompts = json.load(fin) 89 | 90 | with open('llm/sonn_pointllm_prompts.json') as fin: 91 | sonn_pointllm_prompts = json.load(fin) 92 | 93 | 94 | with open('llm/omniobject3d-gpt3.5-turbo.json') as fin: 95 | omni3d_gpt35_prompts = json.load(fin) 96 | 97 | omni3d_gpt4_prompts = [] 98 | omni3d_pointllm_prompts = [] 99 | 100 | with open('llm/objaverse_lvis-gpt3.5-turbo.json') as fin: 101 | o_lvis_gpt35_prompts = json.load(fin) 102 | 103 | o_lvis_gpt4_prompts = [] 104 | o_lvis_pointllm_prompts = [] 105 | -------------------------------------------------------------------------------- /llm/llm_generate_prompts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import openai 4 | import json 5 | from tqdm import tqdm 6 | 7 | 8 | openai.api_key = os.getenv("OPENAI_API_KEY") 9 | dataset = sys.argv[1] # omniobject3d, objaverse_lvis 10 | llm_name = sys.argv[2] # gpt-3.5-turbo, gpt-4 11 | 12 | # NOTE replace the category_list with the desired categories 13 | category_list = [] 14 | if dataset == "omniobject3d": 15 | for cls in os.listdir('data/omniobject3d/1024'): 16 | if os.path.isdir(os.path.join('data/omniobject3d/1024', cls)): 17 | category_list.append(cls) 18 | 19 | elif dataset == "objaverse_lvis": 20 | with open('data/objaverse_lvis/classnames.txt') as fin: 21 | lines = fin.readlines() 22 | category_list = [line.strip() for line in lines] 23 | 24 | else: 25 | print("Invalid dataset") 26 | sys.exit(1) 27 | 28 | all_responses = {} 29 | 30 | vowel_list = ['A', 'E', 'I', 'O', 'U'] 31 | 32 | for category in tqdm(category_list): 33 | 34 | if category[0].upper() in vowel_list: 35 | article = "an" 36 | else: 37 | article = "a" 38 | 39 | prompts = [] 40 | 41 | prompts.append(f"What does {article} {category} point cloud look like?") 42 | prompts.append(f"What are the identifying characteristics of {article} {category} point cloud?") 43 | prompts.append(f"Please describe {article} {category} point cloud with details.") 44 | prompts.append(f"Make a complete and meaningful sentence with the following words: {category}, point cloud.") 45 | with open('prompts.json', 'w') as f: 46 | json.dump(prompts, f, indent=4) 47 | 48 | all_result = [] 49 | for curr_prompt in prompts: 50 | response = openai.ChatCompletion.create( 51 | model=llm_name, 52 | messages=[ 53 | {"role": "system", "content": "You are a helpful assistant."}, 54 | {"role": "user", "content": curr_prompt} 55 | ], 56 | max_tokens=70, 57 | n=10, 58 | stop="." 59 | ) 60 | 61 | for r in range(len(response["choices"])): 62 | result = response["choices"][r]["message"]["content"] 63 | all_result.append(result.replace("\n\n", "") + ".") 64 | 65 | all_responses[category] = all_result 66 | 67 | with open(f'{dataset}-{llm_name}.json', 'w') as f: 68 | json.dump(all_responses, f, indent=4) -------------------------------------------------------------------------------- /models/openshape/__init__.py: -------------------------------------------------------------------------------- 1 | from .ppta import make 2 | 3 | def create_openshape(config): 4 | if config.model.name == "PointBERT": 5 | model = make(config) 6 | else: 7 | raise NotImplementedError("Model %s not supported." % config.model.name) 8 | return model 9 | -------------------------------------------------------------------------------- /models/openshape/config.yaml: -------------------------------------------------------------------------------- 1 | device: cuda:0 2 | project_name: 3D_CLIP 3 | clip_embed_dim: 1280 4 | clip_embed_version: OpenCLIP 5 | model: 6 | name: PointBERT 7 | in_channel: 6 8 | out_channel: ${clip_embed_dim} 9 | embedding_channel: 1024 10 | voxel_size: 0.02 11 | scaling: 4 # 4 for PointBERT-vit-G and 3 for PointBERT-vit-L in `src/models/ppat.py` 12 | 13 | training: 14 | use_openclip_optimizer_scheduler: False 15 | lr: 0.001 16 | lr_decay_step: 10000 17 | lr_decay_rate: 0.95 18 | beta1: 0.9 19 | beta2: 0.999 20 | eps: 1.0e-8 21 | warmup: 10000 22 | max_epoch: 1000 23 | log_freq: 10 24 | save_freq: 20 25 | lambda_img_contras: 1 26 | lambda_text_contras: 1 27 | use_image_proj: False 28 | use_text_proj: False 29 | logit_scale_init: 14.28 30 | use_mask: False 31 | mask_threshold: 0.1 32 | 33 | dataset: 34 | name: Four 35 | train_split: meta_data/split/train_all.json 36 | train_partial: -1 37 | num_points: 10000 38 | num_workers: 6 39 | train_batch_size: 200 40 | use_knn_negative_sample: False 41 | negative_sample_num: 1 42 | knn_path: meta_data/point_feat_knn.npy 43 | y_up: True 44 | normalize: True 45 | random_z_rotate: True 46 | use_color: True 47 | rgb_random_drop_prob: 0.5 48 | augment: True 49 | text_source: [text, caption, retrieval_text] 50 | use_text_filtering: True 51 | use_prompt_engineering: True 52 | gpt4_filtering_path: meta_data/gpt4_filtering.json 53 | 54 | modelnet40: 55 | test_split: meta_data/modelnet40/test_split.json 56 | test_pc: meta_data/modelnet40/test_pc.npy 57 | num_points: 10000 58 | num_workers: 0 59 | test_batch_size: 100 60 | clip_feat_path: meta_data/modelnet40/cat_name_pt_feat.npy 61 | y_up: True 62 | 63 | objaverse_lvis: 64 | split: meta_data/split/lvis.json 65 | clip_feat_path: meta_data/lvis_cat_name_pt_feat.npy 66 | num_points: 10000 67 | num_workers: 6 68 | batch_size: 100 69 | y_up: True 70 | normalize: True 71 | use_color: True 72 | 73 | scanobjectnn: 74 | data_path: meta_data/scanobjectnn/xyz_label.npy 75 | num_points: 10000 76 | num_workers: 0 77 | test_batch_size: 100 78 | clip_feat_path: meta_data/scanobjectnn/cat_name_pt_feat.npy 79 | y_up: True 80 | 81 | modelnet_c: 82 | num_workers: 0 83 | test_batch_size: 100 84 | data_root: meta_data/modelnet_c 85 | cor_type: add_global_2 86 | use_color: True 87 | y_up: True 88 | 89 | modelnet40_c: 90 | num_workers: 0 91 | test_batch_size: 100 92 | data_root: meta_data/modelnet40_c 93 | cor_type: background_2 94 | use_color: True 95 | y_up: True -------------------------------------------------------------------------------- /models/ulip/__init__.py: -------------------------------------------------------------------------------- 1 | from .ulip_model import ULIP 2 | from .text_encoder import TextEncoder 3 | 4 | 5 | def create_clip_text_encoder(args): 6 | model = TextEncoder(args) 7 | return model 8 | 9 | 10 | def create_ulip(args): 11 | model = ULIP(args) 12 | return model -------------------------------------------------------------------------------- /models/ulip/pointbert/PointTransformer_8192point.yaml: -------------------------------------------------------------------------------- 1 | optimizer : { 2 | type: AdamW, 3 | kwargs: { 4 | lr : 0.0005, 5 | weight_decay : 0.05 6 | }} 7 | 8 | scheduler: { 9 | type: CosLR, 10 | kwargs: { 11 | epochs: 300, 12 | initial_epochs : 10 13 | }} 14 | 15 | model : { 16 | NAME: PointTransformer, 17 | trans_dim: 384, 18 | depth: 12, 19 | drop_path_rate: 0.1, 20 | cls_dim: 40, 21 | num_heads: 6, 22 | group_size: 32, 23 | num_group: 512, 24 | encoder_dims: 256, 25 | } 26 | npoints: 8192 27 | total_bs : 32 28 | step_per_update : 1 29 | max_epoch : 300 30 | grad_norm_clip : 10 31 | 32 | consider_metric: CDL1 -------------------------------------------------------------------------------- /models/ulip/pointbert/checkpoint.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import torch.nn as nn 3 | 4 | from typing import Any 5 | from typing import Optional, List, Dict, NamedTuple, Tuple, Iterable 6 | 7 | from termcolor import colored 8 | 9 | def get_missing_parameters_message(keys: List[str]) -> str: 10 | """ 11 | Get a logging-friendly message to report parameter names (keys) that are in 12 | the model but not found in a checkpoint. 13 | Args: 14 | keys (list[str]): List of keys that were not found in the checkpoint. 15 | Returns: 16 | str: message. 17 | """ 18 | groups = _group_checkpoint_keys(keys) 19 | msg = "Some model parameters or buffers are not found in the checkpoint:\n" 20 | msg += "\n".join( 21 | " " + colored(k + _group_to_str(v), "blue") for k, v in groups.items() 22 | ) 23 | return msg 24 | 25 | 26 | def get_unexpected_parameters_message(keys: List[str]) -> str: 27 | """ 28 | Get a logging-friendly message to report parameter names (keys) that are in 29 | the checkpoint but not found in the model. 30 | Args: 31 | keys (list[str]): List of keys that were not found in the model. 32 | Returns: 33 | str: message. 34 | """ 35 | groups = _group_checkpoint_keys(keys) 36 | msg = "The checkpoint state_dict contains keys that are not used by the model:\n" 37 | msg += "\n".join( 38 | " " + colored(k + _group_to_str(v), "magenta") for k, v in groups.items() 39 | ) 40 | return msg 41 | 42 | 43 | def _strip_prefix_if_present(state_dict: Dict[str, Any], prefix: str) -> None: 44 | """ 45 | Strip the prefix in metadata, if any. 46 | Args: 47 | state_dict (OrderedDict): a state-dict to be loaded to the model. 48 | prefix (str): prefix. 49 | """ 50 | keys = sorted(state_dict.keys()) 51 | if not all(len(key) == 0 or key.startswith(prefix) for key in keys): 52 | return 53 | 54 | for key in keys: 55 | newkey = key[len(prefix):] 56 | state_dict[newkey] = state_dict.pop(key) 57 | 58 | # also strip the prefix in metadata, if any.. 59 | try: 60 | metadata = state_dict._metadata # pyre-ignore 61 | except AttributeError: 62 | pass 63 | else: 64 | for key in list(metadata.keys()): 65 | # for the metadata dict, the key can be: 66 | # '': for the DDP module, which we want to remove. 67 | # 'module': for the actual model. 68 | # 'module.xx.xx': for the rest. 69 | 70 | if len(key) == 0: 71 | continue 72 | newkey = key[len(prefix):] 73 | metadata[newkey] = metadata.pop(key) 74 | 75 | 76 | def _group_checkpoint_keys(keys: List[str]) -> Dict[str, List[str]]: 77 | """ 78 | Group keys based on common prefixes. A prefix is the string up to the final 79 | "." in each key. 80 | Args: 81 | keys (list[str]): list of parameter names, i.e. keys in the model 82 | checkpoint dict. 83 | Returns: 84 | dict[list]: keys with common prefixes are grouped into lists. 85 | """ 86 | groups = defaultdict(list) 87 | for key in keys: 88 | pos = key.rfind(".") 89 | if pos >= 0: 90 | head, tail = key[:pos], [key[pos + 1:]] 91 | else: 92 | head, tail = key, [] 93 | groups[head].extend(tail) 94 | return groups 95 | 96 | 97 | def _group_to_str(group: List[str]) -> str: 98 | """ 99 | Format a group of parameter name suffixes into a loggable string. 100 | Args: 101 | group (list[str]): list of parameter name suffixes. 102 | Returns: 103 | str: formated string. 104 | """ 105 | if len(group) == 0: 106 | return "" 107 | 108 | if len(group) == 1: 109 | return "." + group[0] 110 | 111 | return ".{" + ", ".join(group) + "}" 112 | 113 | 114 | def _named_modules_with_dup( 115 | model: nn.Module, prefix: str = "" 116 | ) -> Iterable[Tuple[str, nn.Module]]: 117 | """ 118 | The same as `model.named_modules()`, except that it includes 119 | duplicated modules that have more than one name. 120 | """ 121 | yield prefix, model 122 | for name, module in model._modules.items(): # pyre-ignore 123 | if module is None: 124 | continue 125 | submodule_prefix = prefix + ("." if prefix else "") + name 126 | yield from _named_modules_with_dup(module, submodule_prefix) -------------------------------------------------------------------------------- /models/ulip/pointbert/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import torch.distributed as dist 3 | 4 | logger_initialized = {} 5 | 6 | def get_root_logger(log_file=None, log_level=logging.INFO, name='main'): 7 | """Get root logger and add a keyword filter to it. 8 | The logger will be initialized if it has not been initialized. By default a 9 | StreamHandler will be added. If `log_file` is specified, a FileHandler will 10 | also be added. The name of the root logger is the top-level package name, 11 | e.g., "mmdet3d". 12 | Args: 13 | log_file (str, optional): File path of log. Defaults to None. 14 | log_level (int, optional): The level of logger. 15 | Defaults to logging.INFO. 16 | name (str, optional): The name of the root logger, also used as a 17 | filter keyword. Defaults to 'mmdet3d'. 18 | Returns: 19 | :obj:`logging.Logger`: The obtained logger 20 | """ 21 | logger = get_logger(name=name, log_file=log_file, log_level=log_level) 22 | # add a logging filter 23 | logging_filter = logging.Filter(name) 24 | logging_filter.filter = lambda record: record.find(name) != -1 25 | 26 | return logger 27 | 28 | 29 | def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): 30 | """Initialize and get a logger by name. 31 | If the logger has not been initialized, this method will initialize the 32 | logger by adding one or two handlers, otherwise the initialized logger will 33 | be directly returned. During initialization, a StreamHandler will always be 34 | added. If `log_file` is specified and the process rank is 0, a FileHandler 35 | will also be added. 36 | Args: 37 | name (str): Logger name. 38 | log_file (str | None): The log filename. If specified, a FileHandler 39 | will be added to the logger. 40 | log_level (int): The logger level. Note that only the process of 41 | rank 0 is affected, and other processes will set the level to 42 | "Error" thus be silent most of the time. 43 | file_mode (str): The file mode used in opening log file. 44 | Defaults to 'w'. 45 | Returns: 46 | logging.Logger: The expected logger. 47 | """ 48 | logger = logging.getLogger(name) 49 | if name in logger_initialized: 50 | return logger 51 | # handle hierarchical names 52 | # e.g., logger "a" is initialized, then logger "a.b" will skip the 53 | # initialization since it is a child of "a". 54 | for logger_name in logger_initialized: 55 | if name.startswith(logger_name): 56 | return logger 57 | 58 | # handle duplicate logs to the console 59 | # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) 60 | # to the root logger. As logger.propagate is True by default, this root 61 | # level handler causes logging messages from rank>0 processes to 62 | # unexpectedly show up on the console, creating much unwanted clutter. 63 | # To fix this issue, we set the root logger's StreamHandler, if any, to log 64 | # at the ERROR level. 65 | for handler in logger.root.handlers: 66 | if type(handler) is logging.StreamHandler: 67 | handler.setLevel(logging.ERROR) 68 | 69 | stream_handler = logging.StreamHandler() 70 | handlers = [stream_handler] 71 | 72 | if dist.is_available() and dist.is_initialized(): 73 | rank = dist.get_rank() 74 | else: 75 | rank = 0 76 | 77 | # only rank 0 will add a FileHandler 78 | if rank == 0 and log_file is not None: 79 | # Here, the default behaviour of the official logger is 'a'. Thus, we 80 | # provide an interface to change the file mode to the default 81 | # behaviour. 82 | file_handler = logging.FileHandler(log_file, file_mode) 83 | handlers.append(file_handler) 84 | 85 | formatter = logging.Formatter( 86 | '%(asctime)s - %(name)s - %(levelname)s - %(message)s') 87 | for handler in handlers: 88 | handler.setFormatter(formatter) 89 | handler.setLevel(log_level) 90 | logger.addHandler(handler) 91 | 92 | if rank == 0: 93 | logger.setLevel(log_level) 94 | else: 95 | logger.setLevel(logging.ERROR) 96 | 97 | logger_initialized[name] = True 98 | 99 | 100 | return logger 101 | 102 | 103 | def print_log(msg, logger=None, level=logging.INFO): 104 | """Print a log message. 105 | Args: 106 | msg (str): The message to be logged. 107 | logger (logging.Logger | str | None): The logger to be used. 108 | Some special loggers are: 109 | - "silent": no message will be printed. 110 | - other str: the logger obtained with `get_root_logger(logger)`. 111 | - None: The `print()` method will be used to print log messages. 112 | level (int): Logging level. Only available when `logger` is a Logger 113 | object or "root". 114 | """ 115 | if logger is None: 116 | print(msg) 117 | elif isinstance(logger, logging.Logger): 118 | logger.log(level, msg) 119 | elif logger == 'silent': 120 | pass 121 | elif isinstance(logger, str): 122 | _logger = get_logger(logger) 123 | _logger.log(level, msg) 124 | else: 125 | raise TypeError( 126 | 'logger should be either a logging.Logger object, str, ' 127 | f'"silent" or None, but got {type(logger)}') -------------------------------------------------------------------------------- /models/ulip/text_encoder.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from collections import OrderedDict 3 | 4 | import torch 5 | from torch import nn 6 | 7 | 8 | class LayerNorm(nn.LayerNorm): 9 | """Subclass torch's LayerNorm to handle fp16.""" 10 | 11 | def forward(self, x: torch.Tensor): 12 | orig_type = x.dtype 13 | # NOTE comment by jerry 14 | # ret = super().forward(x.type(torch.float32)) 15 | ret = super().forward(x) 16 | return ret.type(orig_type) 17 | 18 | 19 | class QuickGELU(nn.Module): 20 | def forward(self, x: torch.Tensor): 21 | return x * torch.sigmoid(1.702 * x) 22 | 23 | 24 | class ResidualAttentionBlock(nn.Module): 25 | def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): 26 | super().__init__() 27 | 28 | self.attn = nn.MultiheadAttention(d_model, n_head) 29 | self.ln_1 = LayerNorm(d_model) 30 | self.mlp = nn.Sequential(OrderedDict([ 31 | ("c_fc", nn.Linear(d_model, d_model * 4)), 32 | ("gelu", QuickGELU()), 33 | ("c_proj", nn.Linear(d_model * 4, d_model)) 34 | ])) 35 | self.ln_2 = LayerNorm(d_model) 36 | self.attn_mask = attn_mask 37 | 38 | def attention(self, x: torch.Tensor): 39 | self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None 40 | return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] 41 | 42 | def forward(self, x: torch.Tensor): 43 | x = x + self.attention(self.ln_1(x)) 44 | x = x + self.mlp(self.ln_2(x)) 45 | return x 46 | 47 | 48 | class TextTransformer(nn.Module): 49 | def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): 50 | super().__init__() 51 | self.width = width 52 | self.layers = layers 53 | self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) 54 | 55 | def forward(self, x: torch.Tensor): 56 | return self.resblocks(x) 57 | 58 | 59 | class TextEncoder(nn.Module): 60 | def __init__(self, args): 61 | super().__init__() 62 | # --- text encoder 63 | vocab_size = 49408 64 | transformer_width = 512 65 | transformer_layers = 12 66 | transformer_heads = 8 67 | 68 | self.context_length = 77 69 | self.token_embedding = nn.Embedding(vocab_size, transformer_width) 70 | self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) 71 | self.ln_final = LayerNorm(transformer_width) 72 | 73 | self.transformer = TextTransformer( 74 | width=transformer_width, 75 | layers=transformer_layers, 76 | heads=transformer_heads, 77 | attn_mask=self.build_attention_mask(), 78 | ) 79 | # embed_dim: 512 80 | self.text_projection = nn.Parameter(torch.empty(transformer_width, args.embed_dim)) 81 | self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) 82 | 83 | def build_attention_mask(self): 84 | # lazily create causal attention mask, with full attention between the vision tokens 85 | # pytorch uses additive attention mask; fill with -inf 86 | mask = torch.empty(self.context_length, self.context_length) 87 | mask.fill_(float("-inf")) 88 | mask.triu_(1) # zero out the lower diagonal 89 | return mask 90 | 91 | def encode_text(self, text): 92 | # text: (batch, n_ctx=77) 93 | x = self.token_embedding(text) # [batch_size, n_ctx, d_model] 94 | x = x + self.positional_embedding 95 | x = x.permute(1, 0, 2) # NLD -> LND 96 | x = self.transformer(x) 97 | x = x.permute(1, 0, 2) # LND -> NLD 98 | x = self.ln_final(x) 99 | 100 | # take features from the eot embedding (eot_token is the highest number in each sequence) 101 | x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection 102 | 103 | return x 104 | 105 | def forward(self, text): 106 | # text: (batch, n_ctx=77) 107 | return self.encode_text(text) -------------------------------------------------------------------------------- /models/ulip/ulip_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from .pointbert.point_encoder import PointTransformer 5 | 6 | 7 | class ULIP(nn.Module): 8 | def __init__(self, args): 9 | super().__init__() 10 | self.cache_type = args.cache_type 11 | 12 | # --- point encoder 13 | self.point_encoder = PointTransformer(args) 14 | self.pc_projection = nn.Parameter(torch.empty(args.pc_feat_dim, 512)) 15 | 16 | def forward(self, pc): 17 | if self.cache_type == 'global': 18 | pc_feat = self.point_encoder(pc) 19 | pc_embed = pc_feat @ self.pc_projection 20 | return pc_embed 21 | elif self.cache_type == 'local': 22 | patch_centers = self.point_encoder(pc) 23 | patch_embed = patch_centers @ self.pc_projection 24 | return patch_embed 25 | elif self.cache_type == 'hierarchical': # NOTE 'hierarchical' caches 26 | pc_feat, patch_centers = self.point_encoder(pc) 27 | pc_embed = pc_feat @ self.pc_projection 28 | patch_embed = patch_centers @ self.pc_projection 29 | return pc_embed, patch_embed 30 | else: # NOTE for visualization purpose 31 | pc_feat, all_patches, patch_centers = self.point_encoder(pc) 32 | pc_embed = pc_feat @ self.pc_projection 33 | all_patch_embed = all_patches @ self.pc_projection 34 | patch_embed = patch_centers @ self.pc_projection 35 | return pc_embed, all_patch_embed, patch_embed 36 | -------------------------------------------------------------------------------- /models/uni3d/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import timm 3 | import numpy as np 4 | from torch import nn 5 | 6 | from .point_encoder import PointcloudEncoder 7 | 8 | 9 | class Uni3D(nn.Module): 10 | def __init__(self, args, point_encoder): 11 | super().__init__() 12 | self.cache_type = args.cache_type 13 | self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) 14 | self.point_encoder = point_encoder 15 | 16 | def encode_pc(self, pc): 17 | xyz = pc[:,:,:3].contiguous() 18 | color = pc[:,:,3:].contiguous() 19 | 20 | if self.cache_type == 'global': 21 | pc_feat = self.point_encoder(xyz, color) 22 | return pc_feat 23 | elif self.cache_type == 'local': 24 | patch_centers = self.point_encoder(xyz, color) 25 | return patch_centers 26 | elif self.cache_type == 'hierarchical': 27 | pc_feat, patch_centers = self.point_encoder(xyz, color) 28 | return pc_feat, patch_centers 29 | else: 30 | pc_feat, all_patches, patch_centers = self.point_encoder(xyz, color) 31 | return pc_feat, all_patches, patch_centers 32 | 33 | def forward(self, pc, text, image): 34 | text_embed_all = text 35 | image_embed = image 36 | pc_embed = self.encode_pc(pc) 37 | return {'text_embed': text_embed_all, 38 | 'pc_embed': pc_embed, 39 | 'image_embed': image_embed, 40 | 'logit_scale': self.logit_scale.exp()} 41 | 42 | 43 | def create_uni3d(args): 44 | # create transformer blocks for point cloud via timm 45 | # NOTE 1. pc_model: model name 2. pretrained_pc: model weights 46 | point_transformer = timm.create_model(args.pc_model, checkpoint_path=args.pretrained_pc, drop_path_rate=args.drop_path_rate) 47 | 48 | # create whole point cloud encoder 49 | point_encoder = PointcloudEncoder(point_transformer, args) 50 | 51 | # uni3d model 52 | model = Uni3D(args, point_encoder=point_encoder,) 53 | return model 54 | -------------------------------------------------------------------------------- /notebook/clip/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/clip/.DS_Store -------------------------------------------------------------------------------- /notebook/clip/__init__.py: -------------------------------------------------------------------------------- 1 | from .clip import * 2 | -------------------------------------------------------------------------------- /notebook/clip/bpe_simple_vocab_16e6.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/clip/bpe_simple_vocab_16e6.txt.gz -------------------------------------------------------------------------------- /notebook/clip/simple_tokenizer.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import html 3 | import os 4 | from functools import lru_cache 5 | 6 | import ftfy 7 | import regex as re 8 | 9 | 10 | @lru_cache() 11 | def default_bpe(): 12 | return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") 13 | 14 | 15 | @lru_cache() 16 | def bytes_to_unicode(): 17 | """ 18 | Returns list of utf-8 byte and a corresponding list of unicode strings. 19 | The reversible bpe codes work on unicode strings. 20 | This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. 21 | When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. 22 | This is a signficant percentage of your normal, say, 32K bpe vocab. 23 | To avoid that, we want lookup tables between utf-8 bytes and unicode strings. 24 | And avoids mapping to whitespace/control characters the bpe code barfs on. 25 | """ 26 | bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) 27 | cs = bs[:] 28 | n = 0 29 | for b in range(2**8): 30 | if b not in bs: 31 | bs.append(b) 32 | cs.append(2**8+n) 33 | n += 1 34 | cs = [chr(n) for n in cs] 35 | return dict(zip(bs, cs)) 36 | 37 | 38 | def get_pairs(word): 39 | """Return set of symbol pairs in a word. 40 | Word is represented as tuple of symbols (symbols being variable-length strings). 41 | """ 42 | pairs = set() 43 | prev_char = word[0] 44 | for char in word[1:]: 45 | pairs.add((prev_char, char)) 46 | prev_char = char 47 | return pairs 48 | 49 | 50 | def basic_clean(text): 51 | text = ftfy.fix_text(text) 52 | text = html.unescape(html.unescape(text)) 53 | return text.strip() 54 | 55 | 56 | def whitespace_clean(text): 57 | text = re.sub(r'\s+', ' ', text) 58 | text = text.strip() 59 | return text 60 | 61 | 62 | class SimpleTokenizer(object): 63 | def __init__(self, bpe_path: str = default_bpe()): 64 | self.byte_encoder = bytes_to_unicode() 65 | self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} 66 | merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') 67 | merges = merges[1:49152-256-2+1] 68 | merges = [tuple(merge.split()) for merge in merges] 69 | vocab = list(bytes_to_unicode().values()) 70 | vocab = vocab + [v+'' for v in vocab] 71 | for merge in merges: 72 | vocab.append(''.join(merge)) 73 | vocab.extend(['<|startoftext|>', '<|endoftext|>']) 74 | self.encoder = dict(zip(vocab, range(len(vocab)))) 75 | self.decoder = {v: k for k, v in self.encoder.items()} 76 | self.bpe_ranks = dict(zip(merges, range(len(merges)))) 77 | self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} 78 | self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) 79 | 80 | def bpe(self, token): 81 | if token in self.cache: 82 | return self.cache[token] 83 | word = tuple(token[:-1]) + ( token[-1] + '',) 84 | pairs = get_pairs(word) 85 | 86 | if not pairs: 87 | return token+'' 88 | 89 | while True: 90 | bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) 91 | if bigram not in self.bpe_ranks: 92 | break 93 | first, second = bigram 94 | new_word = [] 95 | i = 0 96 | while i < len(word): 97 | try: 98 | j = word.index(first, i) 99 | new_word.extend(word[i:j]) 100 | i = j 101 | except: 102 | new_word.extend(word[i:]) 103 | break 104 | 105 | if word[i] == first and i < len(word)-1 and word[i+1] == second: 106 | new_word.append(first+second) 107 | i += 2 108 | else: 109 | new_word.append(word[i]) 110 | i += 1 111 | new_word = tuple(new_word) 112 | word = new_word 113 | if len(word) == 1: 114 | break 115 | else: 116 | pairs = get_pairs(word) 117 | word = ' '.join(word) 118 | self.cache[token] = word 119 | return word 120 | 121 | def encode(self, text): 122 | bpe_tokens = [] 123 | text = whitespace_clean(basic_clean(text)).lower() 124 | for token in re.findall(self.pat, text): 125 | token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) 126 | bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) 127 | return bpe_tokens 128 | 129 | def decode(self, tokens): 130 | text = ''.join([self.decoder[token] for token in tokens]) 131 | text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') 132 | return text 133 | -------------------------------------------------------------------------------- /notebook/ent_acc_correspondence.txt: -------------------------------------------------------------------------------- 1 | ## Entropy vs Accuracy - Figure 1 2 | 3 | ### ULIP, sonn_c, obj_only, rotate_2 4 | 581 581 # len(test_loader) len(test_loader) 5 | 0.588749656925711 # zero-shot ent 6 | 0.42422820917982595 # global ent 7 | 3.831588456486194e-06 # hierar ent 8 | 48.54 # zero-shot acc 9 | 49.57 # global acc 10 | 50.95 # heirar acc 11 | 12 | ### ULIP-2, scanobjnn, hardest 13 | 2890 2890 14 | 0.6239618546384748 15 | 0.5161622886767394 16 | 4.658582578852733e-11 17 | 47.61 18 | 51.38 19 | 54.98 20 | 21 | ### OpenShape, modelnet_c, dropout_local_2 22 | 2468 2468 23 | 0.48536727646550953 24 | 0.34432484470058106 25 | 0.0005787112504865848 26 | 73.38 27 | 74.68 28 | 76.26 29 | 30 | ### Uni3D, omniobject3d_4096pts 31 | 5910 5910 32 | 0.6518472730824364 33 | 0.4611860423587824 34 | 0.00024681549752659783 35 | 42.0 36 | 44.65 37 | 45.6 38 | 39 | ## Entropy vs Accuracy - Figure 2 40 | 41 | ### ULIP, sonn_c, obj_only, add_local_2 42 | 581 581 43 | 0.7306837538455156 # zero-shot ent 44 | 0.498802644549544 # global ent 45 | 0.0008959872864828429 # hierar ent 46 | 34.08 # zero-shot acc 47 | 35.97 # global acc 48 | 38.04 # heirar acc 49 | 50 | ### ULIP-2, modelnet40 51 | 2468 2468 52 | 0.29051284683501316 53 | 0.2061172383157257 54 | 2.273536813210597e-06 55 | 73.01 56 | 74.64 57 | 75.53 58 | 59 | ### OpenShape, sonn_c, obj_bg, add_global_2 60 | 581 581 61 | 0.7646623774545375 62 | 0.5581055393448864 63 | 0.0003237533106131637 64 | 50.43 65 | 55.77 66 | 58.0 67 | 68 | ### Uni3D, sonn_c, hardest, rotate_2 69 | 2882 2882 70 | 0.6915098679645646 71 | 0.6033886304971492 72 | 5.783595330371011e-07 73 | 43.41 74 | 46.01 75 | 49.03 -------------------------------------------------------------------------------- /notebook/generate_pc_cls_images.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | from diffusers import DiffusionPipeline 6 | 7 | 8 | def main(dataset, clsname, seed): 9 | # 1. load both base & refiner 10 | base = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', 11 | torch_dtype=torch.float16, variant="fp16", use_safetensors=True) 12 | base.to("cuda:1") 13 | 14 | refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", 15 | text_encoder_2=base.text_encoder_2, 16 | vae=base.vae, 17 | torch_dtype=torch.float16, 18 | use_safetensors=True, 19 | variant="fp16",) 20 | refiner.to("cuda:1") 21 | 22 | # 2. Define how many steps and what % of steps to be run on each experts (80/20) here 23 | n_steps = 40 24 | high_noise_frac = 0.8 25 | 26 | if '_' in clsname: 27 | cls = clsname.replace('_', ' ') 28 | else: 29 | cls = clsname 30 | 31 | if cls == 'airplane': 32 | prompt = f"a single object of an {cls} with clean background" 33 | else: 34 | prompt = f"a single object of a {cls} with clean background" 35 | 36 | # 3. run both experts 37 | image = base( 38 | prompt=prompt, 39 | num_inference_steps=n_steps, 40 | denoising_end=high_noise_frac, 41 | output_type="latent", 42 | ).images 43 | image = refiner( 44 | prompt=prompt, 45 | num_inference_steps=n_steps, 46 | denoising_start=high_noise_frac, 47 | image=image, 48 | ).images[0] 49 | 50 | os.makedirs(f'data/diffusion/{dataset}/{clsname}', exist_ok=True) 51 | image.save(f'data/diffusion/{dataset}/{clsname}/{seed}.png') 52 | print(f'>>> SAVE data/diffusion/{dataset}/{clsname}/{seed}.png Done!') 53 | 54 | 55 | if __name__ == '__main__': 56 | dataset = sys.argv[1] 57 | 58 | if dataset == 'modelnet_c': 59 | cls_file = 'data/modelnet_c/shape_names.txt' 60 | with open(cls_file) as fin: 61 | lines = fin.readlines() 62 | classnames = [line.strip() for line in lines if line.strip() != ""] 63 | 64 | for seed in [1,2,3]: 65 | for clsname in classnames: 66 | main(dataset, clsname, seed) 67 | -------------------------------------------------------------------------------- /notebook/gifs/os_guitar.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/os_guitar.gif -------------------------------------------------------------------------------- /notebook/gifs/os_plant.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/os_plant.gif -------------------------------------------------------------------------------- /notebook/gifs/ulip2_table.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/ulip2_table.gif -------------------------------------------------------------------------------- /notebook/gifs/ulip2_toilet.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/ulip2_toilet.gif -------------------------------------------------------------------------------- /notebook/gifs/ulip_shelf.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/ulip_shelf.gif -------------------------------------------------------------------------------- /notebook/gifs/ulip_sofa.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/ulip_sofa.gif -------------------------------------------------------------------------------- /notebook/gifs/uni3d_calculator.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/uni3d_calculator.gif -------------------------------------------------------------------------------- /notebook/gifs/uni3d_hair_dryer.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/gifs/uni3d_hair_dryer.gif -------------------------------------------------------------------------------- /notebook/images/ablate_alpha_in_cache.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ablate_alpha_in_cache.pdf -------------------------------------------------------------------------------- /notebook/images/ablate_beta_in_cache.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ablate_beta_in_cache.pdf -------------------------------------------------------------------------------- /notebook/images/ablate_k_shot.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ablate_k_shot.pdf -------------------------------------------------------------------------------- /notebook/images/ablate_n_cluster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ablate_n_cluster.pdf -------------------------------------------------------------------------------- /notebook/images/acc_on_clean_and_corrupted_mn40.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/acc_on_clean_and_corrupted_mn40.pdf -------------------------------------------------------------------------------- /notebook/images/acc_on_clean_and_corrupted_sonn_hardest.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/acc_on_clean_and_corrupted_sonn_hardest.pdf -------------------------------------------------------------------------------- /notebook/images/acc_on_omni3d_4096pts.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/acc_on_omni3d_4096pts.pdf -------------------------------------------------------------------------------- /notebook/images/astronaut.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/astronaut.png -------------------------------------------------------------------------------- /notebook/images/bottle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/bottle.png -------------------------------------------------------------------------------- /notebook/images/ent_acc_openshape_modelnet_c_dropout_local_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ent_acc_openshape_modelnet_c_dropout_local_2.pdf -------------------------------------------------------------------------------- /notebook/images/ent_acc_relationship_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ent_acc_relationship_1.pdf -------------------------------------------------------------------------------- /notebook/images/ent_acc_relationship_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ent_acc_relationship_2.pdf -------------------------------------------------------------------------------- /notebook/images/ent_acc_ulip1_sonn_c_obj_only_rotate_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ent_acc_ulip1_sonn_c_obj_only_rotate_2.pdf -------------------------------------------------------------------------------- /notebook/images/ent_acc_ulip2_so_hardest.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ent_acc_ulip2_so_hardest.pdf -------------------------------------------------------------------------------- /notebook/images/ent_acc_uni3d_4096pts.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ent_acc_uni3d_4096pts.pdf -------------------------------------------------------------------------------- /notebook/images/laptop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/laptop.png -------------------------------------------------------------------------------- /notebook/images/large_3d_models_pt_base2new_mn40.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/large_3d_models_pt_base2new_mn40.pdf -------------------------------------------------------------------------------- /notebook/images/mn40_obj_0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/mn40_obj_0.pdf -------------------------------------------------------------------------------- /notebook/images/mn40_obj_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/mn40_obj_1.pdf -------------------------------------------------------------------------------- /notebook/images/mn40_obj_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/mn40_obj_2.pdf -------------------------------------------------------------------------------- /notebook/images/mn40_obj_3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/mn40_obj_3.pdf -------------------------------------------------------------------------------- /notebook/images/mn40_obj_4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/mn40_obj_4.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_bottle.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_bottle.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_bottle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_bottle.png -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_glass_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_glass_box.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_glass_box.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_glass_box.png -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_guitar.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_guitar.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_guitar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_guitar.png -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_plant.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_plant.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_plant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_plant.png -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_stairs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_stairs.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_stairs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_stairs.png -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_tent.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_tent.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_tent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_tent.png -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_tv_stand.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_tv_stand.pdf -------------------------------------------------------------------------------- /notebook/images/modelnet_c_dropout_local_2_tv_stand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/modelnet_c_dropout_local_2_tv_stand.png -------------------------------------------------------------------------------- /notebook/images/omni3d_calculator_4096.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omni3d_calculator_4096.pdf -------------------------------------------------------------------------------- /notebook/images/omni3d_calculator_4096.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omni3d_calculator_4096.png -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_hair_dryer.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_hair_dryer.pdf -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_hair_dryer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_hair_dryer.png -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_pomegranate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_pomegranate.pdf -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_pomegranate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_pomegranate.png -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_shampoo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_shampoo.pdf -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_shampoo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_shampoo.png -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_toy_truck.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_toy_truck.pdf -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_toy_truck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_toy_truck.png -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_watch.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_watch.pdf -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_watch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_watch.png -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_watermelon.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_watermelon.pdf -------------------------------------------------------------------------------- /notebook/images/omniobject3d_4096pts_watermelon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/omniobject3d_4096pts_watermelon.png -------------------------------------------------------------------------------- /notebook/images/openshape_modelnet_c_dropout_local_2_acc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/openshape_modelnet_c_dropout_local_2_acc.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_bottle.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_bottle.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_door.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_door.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_glass_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_glass_box.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_guitar.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_guitar.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_plant.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_plant.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_stairs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_stairs.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_tent.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_tent.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_global_tv_stand.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_global_tv_stand.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_bottle.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_bottle.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_door.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_door.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_glass_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_glass_box.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_guitar.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_guitar.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_plant.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_plant.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_stairs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_stairs.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_tent.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_tent.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_hierar_tv_stand.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_hierar_tv_stand.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_bottle.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_bottle.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_door.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_door.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_glass_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_glass_box.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_guitar.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_guitar.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_plant.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_plant.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_stairs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_stairs.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_tent.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_tent.pdf -------------------------------------------------------------------------------- /notebook/images/os_mn_c_dropout_local_zero_tv_stand.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/os_mn_c_dropout_local_zero_tv_stand.pdf -------------------------------------------------------------------------------- /notebook/images/owl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/owl.png -------------------------------------------------------------------------------- /notebook/images/owl_generated_from_pc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/owl_generated_from_pc.png -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_bed.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_bed.pdf -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_bed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_bed.png -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_box.pdf -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_box.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_box.png -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_cabinet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_cabinet.pdf -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_cabinet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_cabinet.png -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_display.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_display.pdf -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_display.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_display.png -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_sink.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_sink.pdf -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_sink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_sink.png -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_table.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_table.pdf -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_table.png -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_toilet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_toilet.pdf -------------------------------------------------------------------------------- /notebook/images/scanobjnn_hardest_toilet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/scanobjnn_hardest_toilet.png -------------------------------------------------------------------------------- /notebook/images/sdxl-airplane.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sdxl-airplane.png -------------------------------------------------------------------------------- /notebook/images/sdxl-car.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sdxl-car.png -------------------------------------------------------------------------------- /notebook/images/sdxl-resized_airplane.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sdxl-resized_airplane.png -------------------------------------------------------------------------------- /notebook/images/sdxl-resized_desk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sdxl-resized_desk.png -------------------------------------------------------------------------------- /notebook/images/sdxl-ship.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sdxl-ship.png -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_bag.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_bag.pdf -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_bag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_bag.png -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_bin.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_bin.pdf -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_bin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_bin.png -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_desk.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_desk.pdf -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_desk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_desk.png -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_door.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_door.pdf -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_door.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_door.png -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_pillow.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_pillow.pdf -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_pillow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_pillow.png -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_shelf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_shelf.pdf -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_shelf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_shelf.png -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_sofa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_sofa.pdf -------------------------------------------------------------------------------- /notebook/images/sonn_c_obj_only_rotate_2_sofa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/sonn_c_obj_only_rotate_2_sofa.png -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_acc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_acc.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_bag.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_bag.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_bin.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_bin.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_desk.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_desk.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_door.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_door.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_pillow.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_pillow.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_shelf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_shelf.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_sofa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_global_sofa.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_bag.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_bag.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_bin.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_bin.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_desk.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_desk.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_door.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_door.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_pillow.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_pillow.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_shelf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_shelf.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_sofa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_hierar_sofa.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_bag.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_bag.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_bin.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_bin.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_desk.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_desk.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_door.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_door.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_pillow.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_pillow.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_shelf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_shelf.pdf -------------------------------------------------------------------------------- /notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_sofa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip1_sonn_c_obj_only_rotate_2_zero_sofa.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_scanobjnn_hardest_acc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_scanobjnn_hardest_acc.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_global_bed.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_global_bed.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_global_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_global_box.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_global_cabinet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_global_cabinet.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_global_display.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_global_display.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_global_sink.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_global_sink.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_global_table.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_global_table.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_global_toilet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_global_toilet.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_hierar_bed.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_hierar_bed.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_hierar_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_hierar_box.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_hierar_cabinet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_hierar_cabinet.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_hierar_display.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_hierar_display.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_hierar_sink.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_hierar_sink.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_hierar_table.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_hierar_table.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_hierar_toilet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_hierar_toilet.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_zero_bed.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_zero_bed.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_zero_box.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_zero_box.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_zero_cabinet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_zero_cabinet.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_zero_display.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_zero_display.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_zero_sink.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_zero_sink.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_zero_table.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_zero_table.pdf -------------------------------------------------------------------------------- /notebook/images/ulip2_sonn_hardest_zero_toilet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/ulip2_sonn_hardest_zero_toilet.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_global_calculator.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_global_calculator.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_global_hair_dryer.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_global_hair_dryer.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_global_pomegranate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_global_pomegranate.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_global_shampoo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_global_shampoo.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_global_toy_truck.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_global_toy_truck.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_global_watch.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_global_watch.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_global_watermelon.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_global_watermelon.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_hierar_calculator.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_hierar_calculator.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_hierar_hair_dryer.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_hierar_hair_dryer.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_hierar_pomegranate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_hierar_pomegranate.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_hierar_shampoo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_hierar_shampoo.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_hierar_toy_truck.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_hierar_toy_truck.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_hierar_watch.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_hierar_watch.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_hierar_watermelon.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_hierar_watermelon.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_zero_calculator.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_zero_calculator.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_zero_hair_dryer.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_zero_hair_dryer.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_zero_pomegranate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_zero_pomegranate.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_zero_shampoo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_zero_shampoo.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_zero_toy_truck.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_zero_toy_truck.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_zero_watch.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_zero_watch.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omni3d_4096_zero_watermelon.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omni3d_4096_zero_watermelon.pdf -------------------------------------------------------------------------------- /notebook/images/uni3d_omniobject3d_4096pts_acc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/images/uni3d_omniobject3d_4096pts_acc.pdf -------------------------------------------------------------------------------- /notebook/owl_shape_feat.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/notebook/owl_shape_feat.pt -------------------------------------------------------------------------------- /runners/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/runners/__init__.py -------------------------------------------------------------------------------- /runners/param_count.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | from utils.utils import * 6 | 7 | 8 | def main(args): 9 | print('>>> In function `main`') 10 | 11 | load_models(args) 12 | 13 | 14 | if __name__ == '__main__': 15 | args = get_arguments() 16 | # Set random seed 17 | set_random_seed(args.seed) 18 | 19 | main(args) 20 | -------------------------------------------------------------------------------- /runners/zs_infer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import wandb 4 | 5 | import torch 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | from utils.utils import * 9 | 10 | 11 | def infer(args, lm3d_model, test_loader, clip_weights): 12 | # assert args.cache_type == 'local', f'Local cache is expected, but got {args.cache_type}!' 13 | 14 | print('>>> In function `run`: zero-shot inference') 15 | 16 | accuracies = [] 17 | for i, (pc, target, _, rgb) in enumerate(test_loader): 18 | # pc: (1, n, 3) rgb: (1, n, 3) 19 | feature = torch.cat([pc, rgb], dim=-1).half() 20 | target = target.cuda() 21 | 22 | # pc_feats: (1, emb_dim) 23 | # patch_centers: (5, emb_dim) 24 | # clip_logits: (1, n_cls) 25 | # loss: a scalar 26 | # prob_map: (1, n_cls) 27 | # pred: a scalar, class index 28 | if args.cache_type == 'local': 29 | patch_centers, clip_logits, loss, prob_map, pred = get_logits(args, feature, lm3d_model, clip_weights) 30 | elif args.cache_type == 'global': 31 | pc_feats, clip_logits, loss, prob_map, pred = get_logits(args, feature, lm3d_model, clip_weights) 32 | elif args.cache_type == 'hierarchical': 33 | pc_feats, patch_centers, clip_logits, loss, prob_map, pred = get_logits(args, feature, lm3d_model, clip_weights) 34 | else: 35 | raise ValueError(f'The choice from [local, global, hierarchical] is expected, but got {args.cache_type}!') 36 | 37 | acc = cls_acc(clip_logits, target) 38 | accuracies.append(acc) 39 | 40 | wandb.log({"Averaged test accuracy": sum(accuracies)/len(accuracies)}, commit=True) 41 | if i % args.print_freq == 0: 42 | print("---- Zero-shot test accuracy: {:.2f}. ----\n".format(sum(accuracies)/len(accuracies))) 43 | 44 | print("---- ***Final*** Zero-shot test accuracy: {:.2f}. ----\n".format(sum(accuracies)/len(accuracies))) 45 | return sum(accuracies)/len(accuracies) 46 | 47 | 48 | def main(args): 49 | print('>>> In function `main`') 50 | 51 | clip_model, lm3d_model = load_models(args) 52 | 53 | preprocess = None 54 | 55 | # Run TDA on each dataset 56 | dataset_name = args.dataset 57 | print('>>> In loop `for`') 58 | 59 | print(f"Processing {dataset_name} dataset.") 60 | 61 | test_loader, classnames, template = build_test_data_loader(args, dataset_name, args.data_root, preprocess) 62 | 63 | print(f'>>> {[dataset_name]} classnames: {classnames} \n') 64 | 65 | # `clip_weights` are text features of shape (emb_dim, n_cls) 66 | clip_weights = clip_classifier(args, classnames, template, clip_model) 67 | 68 | if args.wandb: 69 | if args.lm3d == 'openshape': 70 | prefix = f"[zs_infer-manual-prompts]/global_feat/{args.lm3d}-{args.oshape_version}" 71 | elif args.lm3d == 'ulip': 72 | prefix = f"[zs_infer-manual-prompts]/global_feat/{args.ulip_version}" 73 | else: 74 | prefix = f"[zs_infer-manual-prompts]/global_feat/{args.lm3d}" 75 | 76 | if '_c' in dataset_name and 'sonn' in dataset_name: 77 | run_name = f"{prefix}/{dataset_name}-{args.sonn_variant}-{args.npoints}/{args.cor_type}" 78 | elif '_c' in dataset_name: 79 | run_name = f"{prefix}/{dataset_name}-{args.npoints}/{args.cor_type}" 80 | elif 'scanobjnn' in dataset_name or 'scanobjectnn' in dataset_name: 81 | run_name = f"{prefix}/{dataset_name}-{args.sonn_variant}-{args.npoints}" 82 | elif 'sim2real_sonn' in dataset_name: 83 | run_name = f"{prefix}/{dataset_name}-{args.sim2real_type}-{args.npoints}" 84 | elif 'pointda' in dataset_name: 85 | run_name = f"{prefix}/{dataset_name}-{args.npoints}" 86 | else: 87 | run_name = f"{prefix}/{dataset_name}-{args.npoints}" 88 | 89 | run = wandb.init(project="Point-TDA", name=run_name) 90 | 91 | zs_acc = infer(args, lm3d_model, test_loader, clip_weights) 92 | 93 | if args.wandb: 94 | wandb.log({f"{dataset_name}": zs_acc}) 95 | run.finish() 96 | 97 | 98 | if __name__ == '__main__': 99 | args = get_arguments() 100 | # Set random seed 101 | set_random_seed(args.seed) 102 | 103 | main(args) 104 | -------------------------------------------------------------------------------- /runners/zs_infer_ablate_seed.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import wandb 4 | 5 | import torch 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | from utils.utils import * 9 | 10 | 11 | def infer(args, lm3d_model, test_loader, clip_weights): 12 | # assert args.cache_type == 'local', f'Local cache is expected, but got {args.cache_type}!' 13 | 14 | print('>>> In function `run`: zero-shot inference') 15 | 16 | accuracies = [] 17 | for i, (pc, target, _, rgb) in enumerate(test_loader): 18 | # pc: (1, n, 3) rgb: (1, n, 3) 19 | feature = torch.cat([pc, rgb], dim=-1).half() 20 | target = target.cuda() 21 | 22 | # pc_feats: (1, emb_dim) 23 | # patch_centers: (5, emb_dim) 24 | # clip_logits: (1, n_cls) 25 | # loss: a scalar 26 | # prob_map: (1, n_cls) 27 | # pred: a scalar, class index 28 | if args.cache_type == 'local': 29 | patch_centers, clip_logits, loss, prob_map, pred = get_logits(args, feature, lm3d_model, clip_weights) 30 | elif args.cache_type == 'global': 31 | pc_feats, clip_logits, loss, prob_map, pred = get_logits(args, feature, lm3d_model, clip_weights) 32 | elif args.cache_type == 'hierarchical': 33 | pc_feats, patch_centers, clip_logits, loss, prob_map, pred = get_logits(args, feature, lm3d_model, clip_weights) 34 | else: 35 | raise ValueError(f'The choice from [local, global, hierarchical] is expected, but got {args.cache_type}!') 36 | 37 | acc = cls_acc(clip_logits, target) 38 | accuracies.append(acc) 39 | 40 | wandb.log({"Averaged test accuracy": sum(accuracies)/len(accuracies)}, commit=True) 41 | if i % args.print_freq == 0: 42 | print("---- Zero-shot test accuracy: {:.2f}. ----\n".format(sum(accuracies)/len(accuracies))) 43 | 44 | print("---- ***Final*** Zero-shot test accuracy: {:.2f}. ----\n".format(sum(accuracies)/len(accuracies))) 45 | return sum(accuracies)/len(accuracies) 46 | 47 | 48 | def main(args): 49 | print('>>> In function `main`') 50 | 51 | clip_model, lm3d_model = load_models(args) 52 | 53 | preprocess = None 54 | 55 | # Run TDA on each dataset 56 | dataset_name = args.dataset 57 | print('>>> In loop `for`') 58 | 59 | print(f"Processing {dataset_name} dataset.") 60 | 61 | test_loader, classnames, template = build_test_data_loader(args, dataset_name, args.data_root, preprocess) 62 | 63 | print(f'>>> {[dataset_name]} classnames: {classnames} \n') 64 | 65 | # `clip_weights` are text features of shape (emb_dim, n_cls) 66 | clip_weights = clip_classifier(args, classnames, template, clip_model) 67 | 68 | if args.wandb: 69 | if args.lm3d == 'openshape': 70 | prefix = f"[zs_infer-ablate-seed{args.seed}]/global_feat/{args.lm3d}-{args.oshape_version}" 71 | elif args.lm3d == 'ulip': 72 | prefix = f"[zs_infer-ablate-seed{args.seed}]/global_feat/{args.ulip_version}" 73 | else: 74 | prefix = f"[zs_infer-ablate-seed{args.seed}]/global_feat/{args.lm3d}" 75 | 76 | if '_c' in dataset_name and 'sonn' in dataset_name: 77 | run_name = f"{prefix}/{dataset_name}-{args.sonn_variant}-{args.npoints}/{args.cor_type}" 78 | elif '_c' in dataset_name: 79 | run_name = f"{prefix}/{dataset_name}-{args.npoints}/{args.cor_type}" 80 | elif 'scanobjnn' in dataset_name: 81 | run_name = f"{prefix}/{dataset_name}-{args.sonn_variant}-{args.npoints}" 82 | elif 'sim2real_sonn' in dataset_name: 83 | run_name = f"{prefix}/{dataset_name}-{args.sim2real_type}-{args.npoints}" 84 | elif 'pointda' in dataset_name: 85 | run_name = f"{prefix}/{dataset_name}-{args.npoints}" 86 | else: 87 | run_name = f"{prefix}/{dataset_name}-{args.npoints}" 88 | 89 | run = wandb.init(project="Point-TDA", name=run_name) 90 | 91 | zs_acc = infer(args, lm3d_model, test_loader, clip_weights) 92 | 93 | if args.wandb: 94 | wandb.log({f"{dataset_name}": zs_acc}) 95 | run.finish() 96 | 97 | 98 | if __name__ == '__main__': 99 | args = get_arguments() 100 | 101 | print('#'*20) 102 | print('\targs.seed:', args.seed) 103 | print('#'*20) 104 | 105 | # Set random seed 106 | set_random_seed(args.seed) 107 | 108 | main(args) 109 | -------------------------------------------------------------------------------- /scripts/eval_model_with_global_cache.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | s2r_type=${11} # so_obj_only_9', 'so_obj_only_11' 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_global \ 26 | python runners/model_with_global_cache.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --sim2real_type ${s2r_type} \ 41 | --npoints ${npoints} 42 | 43 | elif [ "$lm3d" = "openshape" ]; then 44 | pueue add -g point_cache_global \ 45 | python runners/model_with_global_cache.py \ 46 | --config configs \ 47 | --wandb-log \ 48 | --lm3d ${lm3d} \ 49 | --cache-type ${cache_type} \ 50 | --ckpt_path ${ckpt_path} \ 51 | --dataset ${dataset} \ 52 | --sonn_variant ${sonn_variant} \ 53 | --cor_type ${cor_type} \ 54 | --sim2real_type ${s2r_type} \ 55 | --npoints ${npoints} \ 56 | --oshape-version ${os_version} 57 | 58 | elif [ "$lm3d" = "ulip" ]; then 59 | pueue add -g point_cache_global \ 60 | python runners/model_with_global_cache.py \ 61 | --config configs \ 62 | --wandb-log \ 63 | --lm3d ${lm3d} \ 64 | --cache-type ${cache_type} \ 65 | --ckpt_path ${ckpt_path} \ 66 | --dataset ${dataset} \ 67 | --sonn_variant ${sonn_variant} \ 68 | --cor_type ${cor_type} \ 69 | --sim2real_type ${s2r_type} \ 70 | --npoints ${npoints} \ 71 | --ulip-version ${ulip_version} 72 | 73 | else 74 | echo "The model does not match any of the supported ones." 75 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_global_cache_ablate_seed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | s2r_type=${11} # so_obj_only_9', 'so_obj_only_11' 15 | seed=${12} # 1, 2, 3 ... different seed for experiments 16 | 17 | export CUDA_VISIBLE_DEVICES=${gpu} 18 | 19 | if [ "$lm3d" = "uni3d" ]; then 20 | pc_feat_dim=1408 21 | num_group=512 22 | group_size=64 23 | pc_encoder_dim=512 24 | embed_dim=1024 25 | 26 | pueue add -g point_cache_global_ablations \ 27 | python runners/model_with_global_cache_ablate_seed.py \ 28 | --config configs \ 29 | --wandb-log \ 30 | --seed ${seed} \ 31 | --lm3d ${lm3d} \ 32 | --cache-type ${cache_type} \ 33 | --pc-feat-dim ${pc_feat_dim} \ 34 | --num-group ${num_group} \ 35 | --group-size ${group_size} \ 36 | --pc-encoder-dim ${pc_encoder_dim} \ 37 | --embed-dim ${embed_dim} \ 38 | --ckpt_path ${ckpt_path} \ 39 | --dataset ${dataset} \ 40 | --sonn_variant ${sonn_variant} \ 41 | --cor_type ${cor_type} \ 42 | --sim2real_type ${s2r_type} \ 43 | --npoints ${npoints} 44 | 45 | elif [ "$lm3d" = "openshape" ]; then 46 | pueue add -g point_cache_global_ablations \ 47 | python runners/model_with_global_cache_ablate_seed.py \ 48 | --config configs \ 49 | --wandb-log \ 50 | --seed ${seed} \ 51 | --lm3d ${lm3d} \ 52 | --cache-type ${cache_type} \ 53 | --ckpt_path ${ckpt_path} \ 54 | --dataset ${dataset} \ 55 | --sonn_variant ${sonn_variant} \ 56 | --cor_type ${cor_type} \ 57 | --sim2real_type ${s2r_type} \ 58 | --npoints ${npoints} \ 59 | --oshape-version ${os_version} 60 | 61 | elif [ "$lm3d" = "ulip" ]; then 62 | pueue add -g point_cache_global_ablations \ 63 | python runners/model_with_global_cache_ablate_seed.py \ 64 | --config configs \ 65 | --wandb-log \ 66 | --seed ${seed} \ 67 | --lm3d ${lm3d} \ 68 | --cache-type ${cache_type} \ 69 | --ckpt_path ${ckpt_path} \ 70 | --dataset ${dataset} \ 71 | --sonn_variant ${sonn_variant} \ 72 | --cor_type ${cor_type} \ 73 | --sim2real_type ${s2r_type} \ 74 | --npoints ${npoints} \ 75 | --ulip-version ${ulip_version} 76 | 77 | else 78 | echo "The model does not match any of the supported ones." 79 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_global_cache_mem.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_global_mem \ 26 | python runners/model_with_global_cache_mem.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --npoints ${npoints} 41 | 42 | elif [ "$lm3d" = "openshape" ]; then 43 | pueue add -g point_cache_global_mem \ 44 | python runners/model_with_global_cache_mem.py \ 45 | --config configs \ 46 | --wandb-log \ 47 | --lm3d ${lm3d} \ 48 | --cache-type ${cache_type} \ 49 | --ckpt_path ${ckpt_path} \ 50 | --dataset ${dataset} \ 51 | --sonn_variant ${sonn_variant} \ 52 | --cor_type ${cor_type} \ 53 | --npoints ${npoints} \ 54 | --oshape-version ${os_version} 55 | 56 | elif [ "$lm3d" = "ulip" ]; then 57 | pueue add -g point_cache_global_mem \ 58 | python runners/model_with_global_cache_mem.py \ 59 | --config configs \ 60 | --wandb-log \ 61 | --lm3d ${lm3d} \ 62 | --cache-type ${cache_type} \ 63 | --ckpt_path ${ckpt_path} \ 64 | --dataset ${dataset} \ 65 | --sonn_variant ${sonn_variant} \ 66 | --cor_type ${cor_type} \ 67 | --npoints ${npoints} \ 68 | --ulip-version ${ulip_version} 69 | 70 | else 71 | echo "The model does not match any of the supported ones." 72 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_hierar_caches_ablations.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | k_shot=${11} # 1, 3, 5, 7, 9 default is 3 15 | n_cluster=${12} # 3, 5, 7, 9, 11 default is 3 16 | alpha=${13} # 0.5, 1.0, 2.0, 3.0, 4.0, 5.0 default 4.0 17 | beta=${14} # 0.5, 1.0, 3.0, 5.0, 7.0, 9.0 default 3.0 18 | 19 | export CUDA_VISIBLE_DEVICES=${gpu} 20 | 21 | if [ "$lm3d" = "uni3d" ]; then 22 | pc_feat_dim=1408 23 | num_group=512 24 | group_size=64 25 | pc_encoder_dim=512 26 | embed_dim=1024 27 | 28 | pueue add -g point_cache_hierar_ablations \ 29 | python runners/model_with_hierar_caches_ablations.py \ 30 | --config configs \ 31 | --wandb-log \ 32 | --lm3d ${lm3d} \ 33 | --cache-type ${cache_type} \ 34 | --pc-feat-dim ${pc_feat_dim} \ 35 | --num-group ${num_group} \ 36 | --group-size ${group_size} \ 37 | --pc-encoder-dim ${pc_encoder_dim} \ 38 | --embed-dim ${embed_dim} \ 39 | --ckpt_path ${ckpt_path} \ 40 | --dataset ${dataset} \ 41 | --sonn_variant ${sonn_variant} \ 42 | --cor_type ${cor_type} \ 43 | --npoints ${npoints} \ 44 | --k_shot ${k_shot} \ 45 | --n_cluster ${n_cluster} \ 46 | --alpha ${alpha} \ 47 | --beta ${beta} 48 | 49 | elif [ "$lm3d" = "openshape" ]; then 50 | pueue add -g point_cache_hierar_ablations \ 51 | python runners/model_with_hierar_caches_ablations.py \ 52 | --config configs \ 53 | --wandb-log \ 54 | --lm3d ${lm3d} \ 55 | --cache-type ${cache_type} \ 56 | --ckpt_path ${ckpt_path} \ 57 | --dataset ${dataset} \ 58 | --sonn_variant ${sonn_variant} \ 59 | --cor_type ${cor_type} \ 60 | --npoints ${npoints} \ 61 | --oshape-version ${os_version} \ 62 | --k_shot ${k_shot} \ 63 | --n_cluster ${n_cluster} \ 64 | --alpha ${alpha} \ 65 | --beta ${beta} 66 | 67 | elif [ "$lm3d" = "ulip" ]; then 68 | pueue add -g point_cache_hierar_ablations \ 69 | python runners/model_with_hierar_caches_ablations.py \ 70 | --config configs \ 71 | --wandb-log \ 72 | --lm3d ${lm3d} \ 73 | --cache-type ${cache_type} \ 74 | --ckpt_path ${ckpt_path} \ 75 | --dataset ${dataset} \ 76 | --sonn_variant ${sonn_variant} \ 77 | --cor_type ${cor_type} \ 78 | --npoints ${npoints} \ 79 | --ulip-version ${ulip_version} \ 80 | --k_shot ${k_shot} \ 81 | --n_cluster ${n_cluster} \ 82 | --alpha ${alpha} \ 83 | --beta ${beta} 84 | 85 | else 86 | echo "The model does not match any of the supported ones." 87 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_hierarchical_caches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | s2r_type=${11} # so_obj_only_9', 'so_obj_only_11' 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_hierar \ 26 | python runners/model_with_hierarchical_caches.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --sim2real_type ${s2r_type} \ 41 | --npoints ${npoints} 42 | 43 | elif [ "$lm3d" = "openshape" ]; then 44 | pueue add -g point_cache_hierar \ 45 | python runners/model_with_hierarchical_caches.py \ 46 | --config configs \ 47 | --wandb-log \ 48 | --lm3d ${lm3d} \ 49 | --cache-type ${cache_type} \ 50 | --ckpt_path ${ckpt_path} \ 51 | --dataset ${dataset} \ 52 | --sonn_variant ${sonn_variant} \ 53 | --cor_type ${cor_type} \ 54 | --sim2real_type ${s2r_type} \ 55 | --npoints ${npoints} \ 56 | --oshape-version ${os_version} 57 | 58 | elif [ "$lm3d" = "ulip" ]; then 59 | pueue add -g point_cache_hierar \ 60 | python runners/model_with_hierarchical_caches.py \ 61 | --config configs \ 62 | --wandb-log \ 63 | --lm3d ${lm3d} \ 64 | --cache-type ${cache_type} \ 65 | --ckpt_path ${ckpt_path} \ 66 | --dataset ${dataset} \ 67 | --sonn_variant ${sonn_variant} \ 68 | --cor_type ${cor_type} \ 69 | --sim2real_type ${s2r_type} \ 70 | --npoints ${npoints} \ 71 | --ulip-version ${ulip_version} 72 | 73 | else 74 | echo "The model does not match any of the supported ones." 75 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_hierarchical_caches_ablate_seed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | s2r_type=${11} # so_obj_only_9', 'so_obj_only_11' 15 | seed=${12} # 1, 2, 3 ... different seed for experiments 16 | 17 | export CUDA_VISIBLE_DEVICES=${gpu} 18 | 19 | if [ "$lm3d" = "uni3d" ]; then 20 | pc_feat_dim=1408 21 | num_group=512 22 | group_size=64 23 | pc_encoder_dim=512 24 | embed_dim=1024 25 | 26 | pueue add -g point_cache_hierar_ablations \ 27 | python runners/model_with_hierarchical_caches_ablate_seed.py \ 28 | --config configs \ 29 | --wandb-log \ 30 | --seed ${seed} \ 31 | --lm3d ${lm3d} \ 32 | --cache-type ${cache_type} \ 33 | --pc-feat-dim ${pc_feat_dim} \ 34 | --num-group ${num_group} \ 35 | --group-size ${group_size} \ 36 | --pc-encoder-dim ${pc_encoder_dim} \ 37 | --embed-dim ${embed_dim} \ 38 | --ckpt_path ${ckpt_path} \ 39 | --dataset ${dataset} \ 40 | --sonn_variant ${sonn_variant} \ 41 | --cor_type ${cor_type} \ 42 | --sim2real_type ${s2r_type} \ 43 | --npoints ${npoints} 44 | 45 | elif [ "$lm3d" = "openshape" ]; then 46 | pueue add -g point_cache_hierar_ablations \ 47 | python runners/model_with_hierarchical_caches_ablate_seed.py \ 48 | --config configs \ 49 | --wandb-log \ 50 | --seed ${seed} \ 51 | --lm3d ${lm3d} \ 52 | --cache-type ${cache_type} \ 53 | --ckpt_path ${ckpt_path} \ 54 | --dataset ${dataset} \ 55 | --sonn_variant ${sonn_variant} \ 56 | --cor_type ${cor_type} \ 57 | --sim2real_type ${s2r_type} \ 58 | --npoints ${npoints} \ 59 | --oshape-version ${os_version} 60 | 61 | elif [ "$lm3d" = "ulip" ]; then 62 | pueue add -g point_cache_hierar_ablations \ 63 | python runners/model_with_hierarchical_caches_ablate_seed.py \ 64 | --config configs \ 65 | --wandb-log \ 66 | --seed ${seed} \ 67 | --lm3d ${lm3d} \ 68 | --cache-type ${cache_type} \ 69 | --ckpt_path ${ckpt_path} \ 70 | --dataset ${dataset} \ 71 | --sonn_variant ${sonn_variant} \ 72 | --cor_type ${cor_type} \ 73 | --sim2real_type ${s2r_type} \ 74 | --npoints ${npoints} \ 75 | --ulip-version ${ulip_version} 76 | 77 | else 78 | echo "The model does not match any of the supported ones." 79 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_hierarchical_caches_ent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_hierar_ent \ 26 | python runners/model_with_hierarchical_caches_ent.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --npoints ${npoints} 41 | 42 | elif [ "$lm3d" = "openshape" ]; then 43 | pueue add -g point_cache_hierar_ent \ 44 | python runners/model_with_hierarchical_caches_ent.py \ 45 | --config configs \ 46 | --wandb-log \ 47 | --lm3d ${lm3d} \ 48 | --cache-type ${cache_type} \ 49 | --ckpt_path ${ckpt_path} \ 50 | --dataset ${dataset} \ 51 | --sonn_variant ${sonn_variant} \ 52 | --cor_type ${cor_type} \ 53 | --npoints ${npoints} \ 54 | --oshape-version ${os_version} 55 | 56 | elif [ "$lm3d" = "ulip" ]; then 57 | pueue add -g point_cache_hierar_ent \ 58 | python runners/model_with_hierarchical_caches_ent.py \ 59 | --config configs \ 60 | --wandb-log \ 61 | --lm3d ${lm3d} \ 62 | --cache-type ${cache_type} \ 63 | --ckpt_path ${ckpt_path} \ 64 | --dataset ${dataset} \ 65 | --sonn_variant ${sonn_variant} \ 66 | --cor_type ${cor_type} \ 67 | --npoints ${npoints} \ 68 | --ulip-version ${ulip_version} 69 | 70 | else 71 | echo "The model does not match any of the supported ones." 72 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_hierarchical_caches_mem.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_hierar_mem \ 26 | python runners/model_with_hierarchical_caches_mem.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --npoints ${npoints} 41 | 42 | elif [ "$lm3d" = "openshape" ]; then 43 | pueue add -g point_cache_hierar_mem \ 44 | python runners/model_with_hierarchical_caches_mem.py \ 45 | --config configs \ 46 | --wandb-log \ 47 | --lm3d ${lm3d} \ 48 | --cache-type ${cache_type} \ 49 | --ckpt_path ${ckpt_path} \ 50 | --dataset ${dataset} \ 51 | --sonn_variant ${sonn_variant} \ 52 | --cor_type ${cor_type} \ 53 | --npoints ${npoints} \ 54 | --oshape-version ${os_version} 55 | 56 | elif [ "$lm3d" = "ulip" ]; then 57 | pueue add -g point_cache_hierar_mem \ 58 | python runners/model_with_hierarchical_caches_mem.py \ 59 | --config configs \ 60 | --wandb-log \ 61 | --lm3d ${lm3d} \ 62 | --cache-type ${cache_type} \ 63 | --ckpt_path ${ckpt_path} \ 64 | --dataset ${dataset} \ 65 | --sonn_variant ${sonn_variant} \ 66 | --cor_type ${cor_type} \ 67 | --npoints ${npoints} \ 68 | --ulip-version ${ulip_version} 69 | 70 | else 71 | echo "The model does not match any of the supported ones." 72 | fi -------------------------------------------------------------------------------- /scripts/eval_model_with_hierarchical_caches_speed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_hierar_speed \ 26 | python runners/model_with_hierarchical_caches_speed.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --npoints ${npoints} 41 | 42 | elif [ "$lm3d" = "openshape" ]; then 43 | pueue add -g point_cache_hierar_speed \ 44 | python runners/model_with_hierarchical_caches_speed.py \ 45 | --config configs \ 46 | --wandb-log \ 47 | --lm3d ${lm3d} \ 48 | --cache-type ${cache_type} \ 49 | --ckpt_path ${ckpt_path} \ 50 | --dataset ${dataset} \ 51 | --sonn_variant ${sonn_variant} \ 52 | --cor_type ${cor_type} \ 53 | --npoints ${npoints} \ 54 | --oshape-version ${os_version} 55 | 56 | elif [ "$lm3d" = "ulip" ]; then 57 | pueue add -g point_cache_hierar_speed \ 58 | python runners/model_with_hierarchical_caches_speed.py \ 59 | --config configs \ 60 | --wandb-log \ 61 | --lm3d ${lm3d} \ 62 | --cache-type ${cache_type} \ 63 | --ckpt_path ${ckpt_path} \ 64 | --dataset ${dataset} \ 65 | --sonn_variant ${sonn_variant} \ 66 | --cor_type ${cor_type} \ 67 | --npoints ${npoints} \ 68 | --ulip-version ${ulip_version} 69 | 70 | else 71 | echo "The model does not match any of the supported ones." 72 | fi -------------------------------------------------------------------------------- /scripts/eval_zs_infer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d, o-lvis, modelnet40, scanobjnn 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | s2r_type=${11} # so_obj_only_9', 'so_obj_only_11' 15 | 16 | echo "... cache_type: "${cache_type} ... 17 | export CUDA_VISIBLE_DEVICES=${gpu} 18 | 19 | if [ "$lm3d" = "uni3d" ]; then 20 | pc_feat_dim=1408 21 | num_group=512 22 | group_size=64 23 | pc_encoder_dim=512 24 | embed_dim=1024 25 | 26 | pueue add -g point_cache_zs_infer \ 27 | python runners/zs_infer.py \ 28 | --config configs \ 29 | --wandb-log \ 30 | --lm3d ${lm3d} \ 31 | --cache-type ${cache_type} \ 32 | --pc-feat-dim ${pc_feat_dim} \ 33 | --num-group ${num_group} \ 34 | --group-size ${group_size} \ 35 | --pc-encoder-dim ${pc_encoder_dim} \ 36 | --embed-dim ${embed_dim} \ 37 | --ckpt_path ${ckpt_path} \ 38 | --dataset ${dataset} \ 39 | --sonn_variant ${sonn_variant} \ 40 | --cor_type ${cor_type} \ 41 | --sim2real_type ${s2r_type} \ 42 | --npoints ${npoints} 43 | 44 | elif [ "$lm3d" = "openshape" ]; then 45 | pueue add -g point_cache_zs_infer \ 46 | python runners/zs_infer.py \ 47 | --config configs \ 48 | --wandb-log \ 49 | --lm3d ${lm3d} \ 50 | --cache-type ${cache_type} \ 51 | --ckpt_path ${ckpt_path} \ 52 | --dataset ${dataset} \ 53 | --sonn_variant ${sonn_variant} \ 54 | --cor_type ${cor_type} \ 55 | --npoints ${npoints} \ 56 | --sim2real_type ${s2r_type} \ 57 | --oshape-version ${os_version} 58 | 59 | elif [ "$lm3d" = "ulip" ]; then 60 | pueue add -g point_cache_zs_infer \ 61 | python runners/zs_infer.py \ 62 | --config configs \ 63 | --wandb-log \ 64 | --lm3d ${lm3d} \ 65 | --cache-type ${cache_type} \ 66 | --ckpt_path ${ckpt_path} \ 67 | --dataset ${dataset} \ 68 | --sonn_variant ${sonn_variant} \ 69 | --cor_type ${cor_type} \ 70 | --npoints ${npoints} \ 71 | --sim2real_type ${s2r_type} \ 72 | --ulip-version ${ulip_version} 73 | 74 | else 75 | echo "The model does not match any of the supported ones." 76 | fi -------------------------------------------------------------------------------- /scripts/eval_zs_infer_ablate_seed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d, o-lvis, modelnet40, scanobjnn 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | s2r_type=${11} # so_obj_only_9', 'so_obj_only_11' 15 | seed=${12} # 1, 2, 3 ... different seed for experiments 16 | 17 | echo "... cache_type: "${cache_type} ... 18 | export CUDA_VISIBLE_DEVICES=${gpu} 19 | 20 | if [ "$lm3d" = "uni3d" ]; then 21 | pc_feat_dim=1408 22 | num_group=512 23 | group_size=64 24 | pc_encoder_dim=512 25 | embed_dim=1024 26 | 27 | pueue add -g point_cache_zs_infer \ 28 | python runners/zs_infer_ablate_seed.py \ 29 | --config configs \ 30 | --wandb-log \ 31 | --seed ${seed} \ 32 | --lm3d ${lm3d} \ 33 | --cache-type ${cache_type} \ 34 | --pc-feat-dim ${pc_feat_dim} \ 35 | --num-group ${num_group} \ 36 | --group-size ${group_size} \ 37 | --pc-encoder-dim ${pc_encoder_dim} \ 38 | --embed-dim ${embed_dim} \ 39 | --ckpt_path ${ckpt_path} \ 40 | --dataset ${dataset} \ 41 | --sonn_variant ${sonn_variant} \ 42 | --cor_type ${cor_type} \ 43 | --sim2real_type ${s2r_type} \ 44 | --npoints ${npoints} 45 | 46 | elif [ "$lm3d" = "openshape" ]; then 47 | pueue add -g point_cache_zs_infer \ 48 | python runners/zs_infer_ablate_seed.py \ 49 | --config configs \ 50 | --wandb-log \ 51 | --seed ${seed} \ 52 | --lm3d ${lm3d} \ 53 | --cache-type ${cache_type} \ 54 | --ckpt_path ${ckpt_path} \ 55 | --dataset ${dataset} \ 56 | --sonn_variant ${sonn_variant} \ 57 | --cor_type ${cor_type} \ 58 | --npoints ${npoints} \ 59 | --sim2real_type ${s2r_type} \ 60 | --oshape-version ${os_version} 61 | 62 | elif [ "$lm3d" = "ulip" ]; then 63 | pueue add -g point_cache_zs_infer \ 64 | python runners/zs_infer_ablate_seed.py \ 65 | --config configs \ 66 | --wandb-log \ 67 | --seed ${seed} \ 68 | --lm3d ${lm3d} \ 69 | --cache-type ${cache_type} \ 70 | --ckpt_path ${ckpt_path} \ 71 | --dataset ${dataset} \ 72 | --sonn_variant ${sonn_variant} \ 73 | --cor_type ${cor_type} \ 74 | --npoints ${npoints} \ 75 | --sim2real_type ${s2r_type} \ 76 | --ulip-version ${ulip_version} 77 | 78 | else 79 | echo "The model does not match any of the supported ones." 80 | fi -------------------------------------------------------------------------------- /scripts/param_count.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | os_version=$4 # vitl14, vitg14 for `openshape` 8 | ulip_version=$5 # ulip1, ulip2 9 | 10 | export CUDA_VISIBLE_DEVICES=${gpu} 11 | 12 | if [ "$lm3d" = "uni3d" ]; then 13 | pc_feat_dim=1408 14 | num_group=512 15 | group_size=64 16 | pc_encoder_dim=512 17 | embed_dim=1024 18 | 19 | pueue add -g point_cache_zs_infer \ 20 | python runners/param_count.py \ 21 | --config configs \ 22 | --lm3d ${lm3d} \ 23 | --pc-feat-dim ${pc_feat_dim} \ 24 | --num-group ${num_group} \ 25 | --group-size ${group_size} \ 26 | --pc-encoder-dim ${pc_encoder_dim} \ 27 | --embed-dim ${embed_dim} \ 28 | --ckpt_path ${ckpt_path} \ 29 | 30 | elif [ "$lm3d" = "openshape" ]; then 31 | pueue add -g point_cache_zs_infer \ 32 | python runners/param_count.py \ 33 | --config configs \ 34 | --lm3d ${lm3d} \ 35 | --ckpt_path ${ckpt_path} \ 36 | --oshape-version ${os_version} 37 | 38 | elif [ "$lm3d" = "ulip" ]; then 39 | pueue add -g point_cache_zs_infer \ 40 | python runners/param_count.py \ 41 | --config configs \ 42 | --lm3d ${lm3d} \ 43 | --ckpt_path ${ckpt_path} \ 44 | --ulip-version ${ulip_version} 45 | 46 | else 47 | echo "The model does not match any of the supported ones." 48 | fi -------------------------------------------------------------------------------- /scripts/record_adaptation_acc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_vis \ 26 | python runners/record_adaptation_acc.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --npoints ${npoints} 41 | 42 | elif [ "$lm3d" = "openshape" ]; then 43 | pueue add -g point_cache_vis \ 44 | python runners/record_adaptation_acc.py \ 45 | --config configs \ 46 | --wandb-log \ 47 | --lm3d ${lm3d} \ 48 | --cache-type ${cache_type} \ 49 | --ckpt_path ${ckpt_path} \ 50 | --dataset ${dataset} \ 51 | --sonn_variant ${sonn_variant} \ 52 | --cor_type ${cor_type} \ 53 | --npoints ${npoints} \ 54 | --oshape-version ${os_version} 55 | 56 | elif [ "$lm3d" = "ulip" ]; then 57 | pueue add -g point_cache_vis \ 58 | python runners/record_adaptation_acc.py \ 59 | --config configs \ 60 | --wandb-log \ 61 | --lm3d ${lm3d} \ 62 | --cache-type ${cache_type} \ 63 | --ckpt_path ${ckpt_path} \ 64 | --dataset ${dataset} \ 65 | --sonn_variant ${sonn_variant} \ 66 | --cor_type ${cor_type} \ 67 | --npoints ${npoints} \ 68 | --ulip-version ${ulip_version} 69 | 70 | else 71 | echo "The model does not match any of the supported ones." 72 | fi -------------------------------------------------------------------------------- /scripts/record_adaptation_logits.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu=$1 4 | lm3d=$2 # uni3d, openshape, ulip 5 | ckpt_path=$3 # weights/uni3d/lvis/model.pt, weights/uni3d/modelnet40/model.pt, weights/uni3d/scanobjnn/model.pt 6 | # weights/openshape/openshape-pointbert-vitg14-rgb/model.pt, weights/ulip/pointbert_ulip2.pt 7 | dataset=$4 # modelnet_c, sonn_c, snv2_c, omniobject3d 8 | sonn_variant=$5 # obj_only, obj_bg, hardest 9 | cor_type=$6 # add_global_2, jitter_2 10 | npoints=$7 # 1024/4096/16384 for `omniobject3d` 11 | os_version=$8 # vitl14, vitg14 for `openshape` 12 | ulip_version=$9 # ulip1, ulip2 13 | cache_type=${10} # 'global', 'local', 'hierarchical' 14 | 15 | 16 | export CUDA_VISIBLE_DEVICES=${gpu} 17 | 18 | if [ "$lm3d" = "uni3d" ]; then 19 | pc_feat_dim=1408 20 | num_group=512 21 | group_size=64 22 | pc_encoder_dim=512 23 | embed_dim=1024 24 | 25 | pueue add -g point_cache_vis \ 26 | python runners/record_adaptation_logits.py \ 27 | --config configs \ 28 | --wandb-log \ 29 | --lm3d ${lm3d} \ 30 | --cache-type ${cache_type} \ 31 | --pc-feat-dim ${pc_feat_dim} \ 32 | --num-group ${num_group} \ 33 | --group-size ${group_size} \ 34 | --pc-encoder-dim ${pc_encoder_dim} \ 35 | --embed-dim ${embed_dim} \ 36 | --ckpt_path ${ckpt_path} \ 37 | --dataset ${dataset} \ 38 | --sonn_variant ${sonn_variant} \ 39 | --cor_type ${cor_type} \ 40 | --npoints ${npoints} 41 | 42 | elif [ "$lm3d" = "openshape" ]; then 43 | pueue add -g point_cache_vis \ 44 | python runners/record_adaptation_logits.py \ 45 | --config configs \ 46 | --wandb-log \ 47 | --lm3d ${lm3d} \ 48 | --cache-type ${cache_type} \ 49 | --ckpt_path ${ckpt_path} \ 50 | --dataset ${dataset} \ 51 | --sonn_variant ${sonn_variant} \ 52 | --cor_type ${cor_type} \ 53 | --npoints ${npoints} \ 54 | --oshape-version ${os_version} 55 | 56 | elif [ "$lm3d" = "ulip" ]; then 57 | pueue add -g point_cache_vis \ 58 | python runners/record_adaptation_logits.py \ 59 | --config configs \ 60 | --wandb-log \ 61 | --lm3d ${lm3d} \ 62 | --cache-type ${cache_type} \ 63 | --ckpt_path ${ckpt_path} \ 64 | --dataset ${dataset} \ 65 | --sonn_variant ${sonn_variant} \ 66 | --cor_type ${cor_type} \ 67 | --npoints ${npoints} \ 68 | --ulip-version ${ulip_version} 69 | 70 | else 71 | echo "The model does not match any of the supported ones." 72 | fi -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/auniquesun/Point-Cache/55f6c05943189816cd094a594edb6e7caddab7d0/utils/__init__.py -------------------------------------------------------------------------------- /utils/check_img_text_acc.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import open_clip 4 | import torch 5 | 6 | from utils import * 7 | 8 | 9 | def get_arguments(): 10 | """Get arguments of the test-time adaptation.""" 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument('--config', dest='config', help='settings of TDA on specific dataset in yaml format.') 13 | parser.add_argument('--wandb-log', dest='wandb', action='store_true', help='Whether you want to log to wandb. Include this flag to enable logging.') 14 | parser.add_argument('--dataset', dest='dataset', type=str, required=True, help="Dataset to process, separated by a slash (/). Example: I/A/V/R/S") 15 | parser.add_argument('--data-root', dest='data_root', type=str, default='./data/', help='Path to the datasets directory. Default is ./dataset/') 16 | parser.add_argument('--backbone', dest='backbone', type=str, choices=['RN50', 'ViT-B/16'], help='CLIP model backbone to use: RN50 or ViT-B/16.') 17 | 18 | # point encoder & clip & weights 19 | parser.add_argument('--model', default='create_uni3d', type=str) 20 | parser.add_argument("--pc-model", type=str, default="eva_giant_patch14_560", help="Name of pointcloud backbone to use.",) 21 | parser.add_argument("--pretrained-pc", default='', type=str, help="Use a pretrained CLIP model vision weights with the specified tag or file path.",) 22 | parser.add_argument("--clip-model", type=str, default="EVA02-E-14-plus", help="Name of the vision and text backbone to use.",) 23 | parser.add_argument("--pretrained", default='weights/open_clip_pytorch_model_laion2b_s9b_b144k.bin', type=str, help="Use a pretrained CLIP model weights with the specified tag or file path.",) 24 | parser.add_argument("--device", default=0, type=int, help="The GPU device id to use.",) 25 | parser.add_argument('--ckpt_path', default='weights/uni3d_g_ensembled_model.pt', help='the ckpt to test 3d zero shot') 26 | 27 | # point encoder config 28 | parser.add_argument('--npoints', default=8192, type=int, help='number of points used for pre-train and test.') 29 | parser.add_argument("--pc-feat-dim", type=int, default=768, help="Pointcloud feature dimension.") 30 | parser.add_argument("--group-size", type=int, default=32, help="Pointcloud Transformer group size.") 31 | parser.add_argument("--num-group", type=int, default=512, help="Pointcloud Transformer number of groups.") 32 | parser.add_argument("--pc-encoder-dim", type=int, default=512, help="Pointcloud Transformer encoder dimension.") 33 | parser.add_argument("--embed-dim", type=int, default=512, help="teacher embedding dimension.") 34 | parser.add_argument("--patch-dropout", type=float, default=0., help="flip patch dropout.") 35 | parser.add_argument('--drop-path-rate', default=0.0, type=float) 36 | 37 | parser.add_argument('--distributed', action='store_true', default=False, help='whether use distributed inference') 38 | 39 | # data 40 | parser.add_argument('--openshape_setting', action='store_true', default=False, 41 | help='whether to use osaug, by default enabled with openshape.') 42 | parser.add_argument('--objaverse_lvis_root', type=str, default='data/objaverse_lvis', help='') 43 | parser.add_argument('--scanobjnn_root', type=str, default='data/scanobjectnn', help='') 44 | parser.add_argument('--sonn_c_root', type=str, default='data/sonn_c', help='') 45 | parser.add_argument('--sonn_variant', type=str, default='hardest', help='') 46 | parser.add_argument('--modelnet40_root', type=str, default='data/modelnet40', help='') 47 | parser.add_argument('--modelnet_c_root', type=str, default='data/modelnet_c', help='') 48 | parser.add_argument('--modelnet40_c_root', type=str, default='data/modelnet40_c', help='') 49 | parser.add_argument('--modelnet40_sdxl_root', type=str, default='data/diffusion/modelnet40_sdxl', help='') 50 | parser.add_argument('--cor_type', type=str, default='add_global_2', help='data corruption type') 51 | 52 | parser.add_argument("--p_thres", type=float, default=0.1, help="take how many confident images from all images") 53 | parser.add_argument('--imsize', type=int, default=224, help='image resolution') 54 | 55 | args = parser.parse_args() 56 | 57 | return args 58 | 59 | 60 | def main(): 61 | args = get_arguments() 62 | 63 | # 0. create CLIP model and load its weights 64 | clip_model, _, _ = open_clip.create_model_and_transforms(model_name=args.clip_model, pretrained=args.pretrained, device='cpu') 65 | clip_model.half().to(args.device) 66 | clip_model.eval() 67 | 68 | dataset_name = args.dataset 69 | preprocess = None 70 | 71 | test_loader, classnames, template = build_test_data_loader(args, dataset_name, args.data_root, preprocess) 72 | # `clip_txt_weights`: (emb_dim, n_cls) 73 | clip_txt_weights = clip_classifier(classnames, template, clip_model) 74 | emb_dim, n_cls = clip_txt_weights.size() 75 | 76 | if 'modelnet' in dataset_name: 77 | diff_prefix = 'modelnet40' 78 | img_cache = build_img_cache(args, f'{diff_prefix}_sdxl', clip_model) 79 | 80 | # (n_cls*3, emb_dim) (n_cls*3, n_cls) 81 | keys, values = img_cache['keys'], img_cache['values'] 82 | print('keys.shape:', keys.shape) 83 | print('values.shape:', values.shape) 84 | 85 | # keys = keys.reshape(n_cls, -1, emb_dim).mean(dim=1) 86 | # values = values.reshape(n_cls, -1, n_cls).mean(dim=1) 87 | 88 | cnt = 0 89 | total = len(values) 90 | for key, val in zip(keys, values): 91 | pred = torch.argmax(key @ clip_txt_weights) 92 | target = val.argmax(dim=-1) 93 | if pred == target: 94 | cnt += 1 95 | print(f"pred: {pred}\t target: {target}") 96 | print('img-text match acc (%):', cnt/total * 100) 97 | 98 | if __name__ == "__main__": 99 | main() -------------------------------------------------------------------------------- /utils/compute_mean_and_std.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import math 3 | 4 | def compute_mean_and_std(num1, num2, num3): 5 | # Ensure the numbers are positive 6 | if num1 <= 0 or num2 <= 0 or num3 <= 0: 7 | raise ValueError("All numbers must be positive.") 8 | 9 | # Compute the mean 10 | mean = (num1 + num2 + num3) / 3 11 | 12 | # Compute the standard deviation 13 | variance = ((num1 - mean) ** 2 + (num2 - mean) ** 2 + (num3 - mean) ** 2) / 3 14 | std_dev = math.sqrt(variance) 15 | 16 | return mean, std_dev 17 | 18 | if __name__ == "__main__": 19 | if len(sys.argv) != 4: 20 | print("Usage: python script.py ") 21 | sys.exit(1) 22 | 23 | try: 24 | # Parse command-line arguments as floats 25 | num1 = float(sys.argv[1]) 26 | num2 = float(sys.argv[2]) 27 | num3 = float(sys.argv[3]) 28 | 29 | mean, std_dev = compute_mean_and_std(num1, num2, num3) 30 | print(f"Mean: {mean:.2f}, Standard Deviation: {std_dev:.2f}") 31 | except ValueError as e: 32 | print(f"Error: {e}") 33 | sys.exit(1) 34 | -------------------------------------------------------------------------------- /utils/debug.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import open_clip 4 | import torch 5 | 6 | from utils import * 7 | 8 | from models import uni3d 9 | from datasets.modelnet_c_sdxl import ModelNet_C_SDXL 10 | 11 | 12 | def get_arguments(): 13 | """Get arguments of the test-time adaptation.""" 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('--config', dest='config', help='settings of TDA on specific dataset in yaml format.') 16 | parser.add_argument('--wandb-log', dest='wandb', action='store_true', help='Whether you want to log to wandb. Include this flag to enable logging.') 17 | parser.add_argument('--datasets', dest='datasets', type=str, help="Datasets to process, separated by a slash (/). Example: I/A/V/R/S") 18 | parser.add_argument('--data-root', dest='data_root', type=str, default='./data/', help='Path to the datasets directory. Default is ./dataset/') 19 | parser.add_argument('--backbone', dest='backbone', type=str, choices=['RN50', 'ViT-B/16'], help='CLIP model backbone to use: RN50 or ViT-B/16.') 20 | 21 | parser.add_argument("--pc-model", type=str, default="eva_giant_patch14_560", help="Name of pointcloud backbone to use.",) 22 | parser.add_argument("--pretrained-pc", default='', type=str, help="Use a pretrained CLIP model vision weights with the specified tag or file path.",) 23 | 24 | # Model 25 | parser.add_argument('--model', default='create_uni3d', type=str) 26 | parser.add_argument("--clip-model", type=str, default="EVA02-E-14-plus", help="Name of the vision and text backbone to use.",) 27 | parser.add_argument("--pretrained", default='weights/open_clip_pytorch_model_laion2b_s9b_b144k.bin', type=str, help="Use a pretrained CLIP model weights with the specified tag or file path.",) 28 | parser.add_argument("--device", default=0, type=int, help="The GPU device id to use.",) 29 | parser.add_argument('--ckpt_path', default='', help='the ckpt to test 3d zero shot') 30 | 31 | # point encoder 32 | parser.add_argument('--npoints', default=8192, type=int, help='number of points used for pre-train and test.') 33 | parser.add_argument("--pc-feat-dim", type=int, default=768, help="Pointcloud feature dimension.") 34 | parser.add_argument("--group-size", type=int, default=32, help="Pointcloud Transformer group size.") 35 | parser.add_argument("--num-group", type=int, default=512, help="Pointcloud Transformer number of groups.") 36 | parser.add_argument("--pc-encoder-dim", type=int, default=512, help="Pointcloud Transformer encoder dimension.") 37 | parser.add_argument("--embed-dim", type=int, default=512, help="teacher embedding dimension.") 38 | parser.add_argument("--patch-dropout", type=float, default=0., help="flip patch dropout.") 39 | parser.add_argument('--drop-path-rate', default=0.0, type=float) 40 | 41 | parser.add_argument('--modelnet_c_sdxl_root', type=str, default='data/diffusion/modelnet_c_sdxl', help='') 42 | 43 | parser.add_argument('--distributed', action='store_true', default=False) 44 | 45 | args = parser.parse_args() 46 | 47 | return args 48 | 49 | def run(args): 50 | # 0. create CLIP model and load its weights 51 | clip_model, _, _ = open_clip.create_model_and_transforms(model_name=args.clip_model, pretrained=args.pretrained) 52 | clip_model.to(args.device) 53 | print(clip_model.visual.state_dict().keys()) 54 | 55 | # 1. create 3D model 56 | lm3d_model = uni3d.create_uni3d(args) 57 | lm3d_model.to(args.device) 58 | lm3d_model.eval() 59 | 60 | # 2. load 3D model pre-trained weights 61 | checkpoint = torch.load(args.ckpt_path, map_location='cpu') 62 | sd = checkpoint['module'] 63 | 64 | print(sd.keys()) 65 | 66 | # NOTE `args.distributed` 这个参数到底是哪来的? 67 | # 1. 目前没看到它从哪定义的,只是看到了对它赋值 68 | # 2. 终究得自己定义,否则报错 69 | if not args.distributed and next(iter(sd.items()))[0].startswith('module'): 70 | sd = {k[len('module.'):]: v for k, v in sd.items()} 71 | lm3d_model.load_state_dict(sd) 72 | 73 | # 3. check ModelNet_C_SDXL definition and the order of classes 74 | test_loader, _, _ = build_test_data_loader(args, 'modelnet_c_sdxl', None, None) 75 | print('len(test_loader):', len(test_loader)) 76 | 77 | for i, (images, target) in enumerate(test_loader): 78 | print(f'{i}'.zfill(2), f'images.shape: {images.shape}', f'target: {target}') 79 | 80 | 81 | if '__main__' == __name__: 82 | args = get_arguments() 83 | 84 | run(args) -------------------------------------------------------------------------------- /utils/find_class_pc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | 6 | # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 7 | from utils.utils import build_test_data_loader 8 | from utils.utils import get_arguments 9 | 10 | 11 | def get_dataset(args): 12 | print('='*20) 13 | print(args) 14 | print('='*20) 15 | dataset_name = args.dataset 16 | test_loader, _, _ = build_test_data_loader(args, dataset_name, args.data_root, None) 17 | print('len(test_loader):', len(test_loader)) 18 | 19 | return test_loader 20 | 21 | 22 | if '__main__' == __name__: 23 | args = get_arguments() 24 | 25 | dataset = args.dataset 26 | print('dataset:', dataset) 27 | print('sonn_variant:', args.sonn_variant) 28 | print('cor_type:', args.cor_type) 29 | print('cname:', args.cname) 30 | print('npoints:', args.npoints) 31 | 32 | test_loader = get_dataset(args) 33 | cnames = test_loader.dataset.classnames 34 | print('cnames:', cnames) 35 | 36 | # - ulip1, sonn_c, obj_only, rotate_2 gt_class: 8, cname: shelf 37 | # - ulip1, sonn_c, obj_only, rotate_2 gt_class: 1, cname: bin 38 | # - ulip1, sonn_c, obj_only, rotate_2 gt_class: 0, cname: bag 39 | # - ulip1, sonn_c, obj_only, rotate_2 gt_class: 11, cname: pillow 40 | # - ulip1, sonn_c, obj_only, rotate_2 gt_class: 7, cname: door 41 | # - ulip1, sonn_c, obj_only, rotate_2 gt_class: 13, cname: sofa 42 | # - ulip1, sonn_c, obj_only, rotate_2 gt_class: 5, cname: desk 43 | 44 | # - ulip2, sonn_hardest gt_class: 9, cname: table 45 | # - ulip2, sonn_hardest gt_class: 6, cname: display 46 | # - ulip2, sonn_hardest gt_class: 12, cname: sink 47 | # - ulip2, sonn_hardest gt_class: 14, cname: toilet 48 | # - ulip2, sonn_hardest gt_class: 10, cname: bed 49 | # - ulip2, sonn_hardest gt_class: 2, cname: box 50 | # - ulip2, sonn_hardest gt_class: 3, cname: cabinet 51 | 52 | # - openshape, modelnet_c, dropout_local_2 gt_class: 17, cname: guitar 53 | # - openshape, modelnet_c, dropout_local_2 gt_class: 5, cname: bottle 54 | # - openshape, modelnet_c, dropout_local_2 gt_class: 16, cname: glass_box 55 | # - openshape, modelnet_c, dropout_local_2 gt_class: 36, cname: tv_stand 56 | # - openshape, modelnet_c, dropout_local_2 gt_class: 13, cname: door 57 | # - openshape, modelnet_c, dropout_local_2 gt_class: 34, cname: tent 58 | # - openshape, modelnet_c, dropout_local_2 gt_class: 26, cname: plant 59 | # - openshape, modelnet_c, dropout_local_2 gt_class: 31, cname: stairs 60 | 61 | # - uni3d, omni3d, 4096 pts gt_class: 30, cname: calculator 62 | # - uni3d, omni3d, 4096 pts gt_class: 146, cname: pomegranate 63 | # - uni3d, omni3d, 4096 pts gt_class: 164, cname: shampoo 64 | # - uni3d, omni3d, 4096 pts gt_class: 212, cname: watermelon 65 | # - uni3d, omni3d, 4096 pts gt_class: 210, cname: watch 66 | # - uni3d, omni3d, 4096 pts gt_class: 83, cname: hair dryer 67 | # - uni3d, omni3d, 4096 pts gt_class: 203, cname: toy truck 68 | 69 | for i, (pc, target, _, rgb) in enumerate(test_loader): 70 | if cnames[target.item()] == args.cname: 71 | d = {'pc': pc, 'label': cnames[target.item()]} 72 | torch.save(d, f'outputs/saved_pth_tensors/{args.dataset}_{args.sonn_variant}_{args.cor_type}_{cnames[target.item()]}.pth') 73 | print(cnames[target.item()], ':', target.item()) 74 | break 75 | -------------------------------------------------------------------------------- /utils/generate_pc_view_labels.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import h5py 3 | 4 | import numpy as np 5 | 6 | cor_type = sys.argv[1] # add_global_2 7 | file = f'data/modelnet_c/{cor_type}.h5' 8 | 9 | h5_f = h5py.File(file) 10 | label = h5_f['label'][:].astype(np.int32) 11 | 12 | with open(f'/home/hongyu/data/pc_views/modelnet_c_views/{cor_type}/labels.txt', 'w') as fout: 13 | for val in label: 14 | fout.write(f'{val.item()}\n') 15 | 16 | print(f"Generate /home/hongyu/data/pc_views/modelnet_c_views/{cor_type}/labels.txt done!") --------------------------------------------------------------------------------