├── Dockerfile ├── LICENSE.txt ├── README.md ├── conda_env.yml ├── images ├── mem.png ├── model.png ├── noiseceiling.png └── teaser.png └── mem ├── backbone.py ├── behav_embed.py ├── blocks.py ├── callbacks.py ├── cluster_utils.py ├── common_utils.py ├── config.py ├── config.pyi ├── config_utils.py ├── configs ├── L.yaml ├── M.yaml ├── S.yaml ├── bold5000.yaml ├── debug.yaml ├── dev.yaml ├── dev_B.yaml ├── xvaa.yaml ├── xvba.yaml ├── xvea.yaml ├── xvfe.yaml ├── xvff.yaml └── xvga.yaml ├── crun.py ├── dark_onemodel.py ├── datamodule.py ├── datasets.py ├── exp_utils.py ├── loss.py ├── metrics.py ├── models.py ├── neck.py ├── optimizers.py ├── plmodels.py ├── point_pe.py ├── prepare_cache.py ├── preparedata_bold5000.py ├── preparedata_nsd.py ├── read_utils.py ├── registry.py ├── save_config.py ├── scripts_heavy ├── do_one_job.sh ├── do_start_jobs.sh ├── do_stop_donejobs.sh ├── do_stop_jobs.sh ├── sync_ckpt.sh ├── xvaa_topyneck.py ├── xvab_gather.py ├── xvba_soup.py ├── xvbaa_darkpred.py ├── xvbab_submission.py ├── xvbb_clustering.py ├── xvbc_roimodel.py ├── xvda_do_soup.py ├── xvdb_load_soup.py ├── xvfe_distillmodel.py ├── xvfeb_dosoup.py ├── xvfec_dark_predict.py ├── xvfed_local_eval.py ├── xvfee_morebaseline.py ├── xvfef_moredistill.py ├── xvga_distill_roimodel.py └── xvgb_load_soup.py ├── scripts_light ├── toy_ablation.py ├── xvba_nerfed_soup.py ├── xvbaa_darkpred.py └── xvbab_submission.py ├── scripts_paper ├── xbaa_prevframe_b5k.py ├── xbab_line_prevframe_b5k.py ├── xdaa_prevframe.py ├── xdab_plot_prevframe.py ├── xdac_line_prevframe.py ├── xdacb_single_line_prevframe.py ├── xdad_table_prevframe.py ├── xdae_diff_prevframe.py ├── xdba_behavior.py ├── xdbb_plot_behavior.py ├── xdbc_plot_behavior.py ├── xdcaa_topyneck.py ├── xdcab_gather.py ├── xdcabb_plot_topyneck.py ├── xdcac_ablation.py ├── xdcad_ablation_table.py ├── xdea_before_after.py └── xdeb_plot_before_after.py ├── scripts_tune ├── tune_b2b3.py ├── tune_backbone.py ├── tune_behvmkii.py ├── tune_bottleneck.py ├── tune_fs_1mm.py ├── tune_fship_grid.py ├── tune_longtraining.py ├── tune_lrbsz.py ├── tune_modelsize.py ├── tune_more_behv.py ├── tune_morevoxel.py └── tune_prevframe.py ├── topyneck.py └── train_utils.py /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM huzeeee/afo:latest 2 | 3 | WORKDIR /workspace 4 | 5 | CMD ["sleep", "infinity"] 6 | -------------------------------------------------------------------------------- /conda_env.yml: -------------------------------------------------------------------------------- 1 | name: mem 2 | channels: 3 | - defaults 4 | - pytorch 5 | - nvidia 6 | - xformers 7 | - conda-forge 8 | dependencies: 9 | - python=3.9 10 | - pytorch::pytorch=2.0.0 11 | - pytorch::pytorch-cuda=11.7.0 12 | - pytorch::torchvision=0.15.0 13 | - numpy 14 | - pillow 15 | - omegaconf 16 | - torchmetrics=0.10.3 17 | - fvcore 18 | - iopath 19 | - xformers::xformers=0.0.18 20 | - pip 21 | - pip: 22 | - git+https://github.com/facebookincubator/submitit 23 | - --extra-index-url https://pypi.nvidia.com 24 | - cuml-cu11 25 | - setuptools 26 | - wheel 27 | - cython 28 | - yacs 29 | - pytorch-lightning==1.9.4 30 | - matplotlib 31 | - colorstamps 32 | - nilearn 33 | - scipy 34 | - fast-pytorch-kmeans 35 | - einops 36 | - python-dotenv 37 | - open_clip_torch==2.20.0 38 | - git+https://github.com/facebookresearch/segment-anything.git@6fdee8f2727f4506cfbbe553e23b895e27956588 39 | - git+https://github.com/facebookresearch/dinov2.git@ebc1cba1096de0a5388527817cf1e5d4d3896699 40 | - timm==0.9.2 41 | - pycortex -------------------------------------------------------------------------------- /images/mem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huzeyann/MemoryEncodingModel/224478873992ec8454513f446a4c90aa9376bf70/images/mem.png -------------------------------------------------------------------------------- /images/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huzeyann/MemoryEncodingModel/224478873992ec8454513f446a4c90aa9376bf70/images/model.png -------------------------------------------------------------------------------- /images/noiseceiling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huzeyann/MemoryEncodingModel/224478873992ec8454513f446a4c90aa9376bf70/images/noiseceiling.png -------------------------------------------------------------------------------- /images/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huzeyann/MemoryEncodingModel/224478873992ec8454513f446a4c90aa9376bf70/images/teaser.png -------------------------------------------------------------------------------- /mem/behav_embed.py: -------------------------------------------------------------------------------- 1 | from torch import nn, Tensor 2 | from einops import rearrange 3 | from typing import Dict, Optional 4 | from config import AutoConfig 5 | 6 | 7 | from timm.layers.mlp import Mlp 8 | from timm.layers.norm import LayerNorm 9 | 10 | 11 | 12 | class SubjectBehaviorEmbed(nn.Module): 13 | def __init__( 14 | self, 15 | subject_list, 16 | in_dim, 17 | dim, 18 | dropout=0.2, # dropout for handle behavior data free case 19 | ): 20 | super().__init__() 21 | self.subject_list = subject_list 22 | 23 | self.embed = nn.ModuleDict() 24 | for subject in self.subject_list: 25 | block = nn.Sequential( 26 | nn.Linear(in_dim, dim), 27 | nn.GELU(), 28 | ) 29 | self.embed[subject] = block 30 | 31 | self.mlp = Mlp(dim, out_features=dim) 32 | 33 | self.dropout = nn.Sequential( 34 | nn.Unflatten(1, (dim, 1)), # [B, D, 1] 35 | nn.Dropout1d(dropout), # dropout on the entire D 36 | nn.Flatten(1, -1), # [B, D] 37 | ) 38 | def forward(self, c: Tensor, subject: str): 39 | if c is not None: 40 | c = self.embed[subject](c) 41 | c = self.mlp(c) 42 | c = self.dropout(c) 43 | # dropout in training but not validation 44 | return c 45 | 46 | 47 | def build_behavior_embed(cfg: AutoConfig, out_dim=None): 48 | out_dim = out_dim or cfg.MODEL.COND.DIM 49 | return SubjectBehaviorEmbed( 50 | subject_list=cfg.DATASET.SUBJECT_LIST, 51 | in_dim=cfg.MODEL.COND.IN_DIM, 52 | dim=out_dim, 53 | dropout=cfg.MODEL.COND.DROPOUT, 54 | ) 55 | -------------------------------------------------------------------------------- /mem/cluster_utils.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Dict 3 | from ray import tune 4 | from filelock import Timeout, FileLock 5 | import os 6 | import logging 7 | 8 | # monkey patching 9 | import ray 10 | from ray.tune.experiment import Trial 11 | 12 | 13 | def my_create_unique_logdir_name(root: str, relative_logdir: str) -> str: 14 | candidate = Path(root).expanduser().joinpath(relative_logdir) 15 | if candidate.exists(): 16 | # relative_logdir_old = relative_logdir 17 | # relative_logdir += "_" + uuid.uuid4().hex[:4] 18 | # logger.info( 19 | # f"Creating a new dirname {relative_logdir} because " 20 | # f"trial dirname '{relative_logdir_old}' already exists." 21 | # ) 22 | pass 23 | return relative_logdir 24 | 25 | 26 | ray.tune.experiment.trial._create_unique_logdir_name = my_create_unique_logdir_name 27 | 28 | 29 | def my_nfs_cluster_job(func): 30 | def inner(*args, **kwargs): 31 | log_dir = tune.get_trial_dir() 32 | done_path = os.path.join(log_dir, "done") 33 | if os.path.exists(done_path): 34 | logging.warning(f"Experiment {log_dir} already done, skipping.") 35 | return 36 | lock_path = os.path.join(log_dir, "lockfile") 37 | lock = FileLock(lock_path, timeout=1) 38 | try: 39 | with lock.acquire(timeout=1): 40 | func(*args, **kwargs) 41 | with open(done_path, "w") as f: 42 | f.write("done") 43 | except Timeout: 44 | import sys 45 | sys.tracebacklimit = -1 46 | 47 | raise RuntimeError( 48 | "Failed to acquire lock, another process is running this experiment." 49 | ) 50 | 51 | return inner 52 | 53 | 54 | def trial_dirname_creator(trial : Trial): 55 | config : Dict = trial.config 56 | # config_str = "_".join([f"{k}={'_'.join(v)}" for k, v in config.items()]) 57 | config_str = "" 58 | for k, v in config.items(): 59 | if isinstance(v, list): 60 | v = [str(x) for x in v] 61 | config_str += f"{k}={'_'.join(v)}," 62 | else: 63 | config_str += f"{k}={v}," 64 | max_len = 60 65 | config_str = config_str[:max_len] 66 | return f"t{trial.trial_id}_{config_str}" 67 | 68 | def trail_name_creator(trial): 69 | return f"t{trial.trial_id}" 70 | 71 | # def trial_name_creator(trial): 72 | # return f"t" 73 | -------------------------------------------------------------------------------- /mem/common_utils.py: -------------------------------------------------------------------------------- 1 | from itertools import chain, combinations 2 | from torch import Tensor 3 | import torch 4 | 5 | def count_nan(x: Tensor): 6 | count = torch.sum(torch.isnan(x)) 7 | percent = count / x.numel() 8 | return count, percent 9 | 10 | def all_subsets(ss): 11 | """ 12 | Returns all non-empty subsets of a set. 13 | """ 14 | return list(chain(*map(lambda x: combinations(ss, x), range(1, len(ss) + 1)))) 15 | 16 | 17 | def subsets( 18 | ss, 19 | exclude: list = [ 20 | 0, 21 | ], 22 | ): 23 | """ 24 | Returns all non-empty subsets of a set. 25 | """ 26 | return list( 27 | chain( 28 | *map( 29 | lambda x: combinations(ss, x), 30 | [i for i in range(0, len(ss) + 1) if i not in exclude], 31 | ) 32 | ) 33 | ) 34 | 35 | 36 | if __name__ == "__main__": 37 | subjects = ["E1", "E2", "E3", "M1", "M2"] 38 | full_ss = subsets(subjects) 39 | ss = subsets(subjects, exclude=[0, 3]) 40 | print(len(full_ss)) 41 | pass 42 | -------------------------------------------------------------------------------- /mem/config.pyi: -------------------------------------------------------------------------------- 1 | import typing as T 2 | 3 | from yacs.config import CfgNode as CN 4 | 5 | _C: AutoConfig 6 | 7 | class Experimental(CN): 8 | SHUFFLE_IMAGES: bool 9 | BLANK_IMAGE: bool 10 | T_IMAGE: int 11 | USE_PREV_FRAME: bool 12 | USE_RETINA_MAPPER: bool 13 | USE_LAYER_SELECTOR: bool 14 | USE_BHV: bool 15 | USE_BHV_PASSTHROUGH: bool 16 | BEHV_ONLY: bool 17 | BEHV_SELECTION: T.Sequence 18 | BACKBONE_NOGRAD: bool 19 | STRAIGHT_FORWARD: bool 20 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: bool 21 | ANOTHER_SPLIT: bool 22 | SHUFFLE_VAL: bool 23 | NO_SPLIT: bool 24 | USE_DEV_MODEL: bool 25 | 26 | class Datamodule(CN): 27 | BATCH_SIZE: int 28 | NUM_WORKERS: int 29 | PIN_MEMORY: bool 30 | FEATURE_EXTRACTOR_MODE: bool 31 | 32 | class Dataset(CN): 33 | IMAGE_RESOLUTION: T.Sequence 34 | N_PREV_FRAMES: int 35 | CACHE_DIR: str 36 | SUBJECT_LIST: T.Sequence 37 | ROIS: T.Sequence 38 | FMRI_SPACE: str 39 | FILTER_BY_SESSION: T.Sequence 40 | ROOT: str 41 | DARK_POSTFIX: str 42 | 43 | class Position_encoding(CN): 44 | IN_DIM: int 45 | MAX_STEPS: int 46 | FEATURES: int 47 | PERIODS: int 48 | 49 | class Lora(CN): 50 | SCALE: float 51 | RANK: int 52 | 53 | class Adaptive_ln(CN): 54 | SCALE: float 55 | 56 | class Backbone(CN): 57 | NAME: str 58 | CACHE_DIR: str 59 | LAYERS: T.Sequence 60 | FEATURE_DIMS: T.Sequence 61 | CLS_DIMS: T.Sequence 62 | LORA: Lora 63 | ADAPTIVE_LN: Adaptive_ln 64 | 65 | class Lora_1(CN): 66 | SCALE: float 67 | RANK: int 68 | 69 | class Adaptive_ln_1(CN): 70 | SCALE: float 71 | 72 | class Backbone_small(CN): 73 | NAME: str 74 | LAYERS: T.Sequence 75 | CLS_DIMS: T.Sequence 76 | T_DIM: int 77 | WIDTH: int 78 | MERGE_WIDTH: int 79 | LORA: Lora_1 80 | ADAPTIVE_LN: Adaptive_ln_1 81 | 82 | class Prev_feat(CN): 83 | DIM: int 84 | 85 | class Conv_head(CN): 86 | MAX_DIM: int 87 | KERNEL_SIZES: T.Sequence 88 | DEPTHS: T.Sequence 89 | WIDTH: int 90 | SIMPLE: bool 91 | 92 | class Cond(CN): 93 | USE: bool 94 | DROPOUT: float 95 | IN_DIM: int 96 | DIM: int 97 | PASSTHROUGH_DIM: int 98 | 99 | class Coords_mlp(CN): 100 | WIDTH: int 101 | DEPTH: int 102 | LOG: bool 103 | 104 | class Retina_mapper(CN): 105 | CONSTANT_SIGMA: float 106 | 107 | class Layer_selector(CN): {} 108 | 109 | class Bottleneck(CN): 110 | RANK: int 111 | OUT_DIM: int 112 | 113 | class Mlp(CN): 114 | DEPTH: int 115 | WIDTH: int 116 | 117 | class Shared(CN): 118 | USE: bool 119 | MLP: Mlp 120 | 121 | class Voxel_outs(CN): 122 | SHARED: Shared 123 | 124 | class Model(CN): 125 | WIDTH_RATIO: float 126 | BACKBONE: Backbone 127 | BACKBONE_SMALL: Backbone_small 128 | PREV_FEAT: Prev_feat 129 | CONV_HEAD: Conv_head 130 | COND: Cond 131 | MAX_TRAIN_VOXELS: int 132 | CHUNK_SIZE: int 133 | COORDS_MLP: Coords_mlp 134 | RETINA_MAPPER: Retina_mapper 135 | LAYER_SELECTOR: Layer_selector 136 | BOTTLENECK: Bottleneck 137 | VOXEL_OUTS: Voxel_outs 138 | 139 | class Sync(CN): 140 | USE: bool 141 | STAGE: str 142 | SKIP_EPOCHS: int 143 | EMA_BETA: float 144 | EMA_BIAS_CORRECTION: bool 145 | UPDATE_RULE: str 146 | EXP_SCALE: float 147 | EXP_SHIFT: float 148 | LOG_SHIFT: float 149 | EMA_KEY: str 150 | 151 | class Anneal(CN): 152 | T: int 153 | 154 | class Dark(CN): 155 | USE: bool 156 | MAX_EPOCH: int 157 | GT_ROIS: T.Sequence 158 | GT_SCALE_UP_COEF: float 159 | ANNEAL: Anneal 160 | 161 | class Loss(CN): 162 | NAME: str 163 | SMOOTH_L1_BETA: float 164 | SYNC: Sync 165 | DARK: Dark 166 | 167 | class Regularizer(CN): 168 | LAYER: float 169 | 170 | class Scheduler(CN): 171 | T_INITIAL: int 172 | T_MULT: float 173 | CYCLE_DECAY: float 174 | CYCLE_LIMIT: int 175 | WARMUP_T: int 176 | K_DECAY: float 177 | LR_MIN: float 178 | LR_MIN_WARMUP: float 179 | 180 | class Optimizer(CN): 181 | NAME: str 182 | LR: float 183 | WEIGHT_DECAY: float 184 | SCHEDULER: Scheduler 185 | 186 | class Early_stop(CN): 187 | PATIENCE: int 188 | 189 | class Checkpoint(CN): 190 | SAVE_TOP_K: int 191 | REMOVE: bool 192 | LOAD_BEST_ON_VAL: bool 193 | LOAD_BEST_ON_END: bool 194 | 195 | class Callbacks(CN): 196 | EARLY_STOP: Early_stop 197 | CHECKPOINT: Checkpoint 198 | 199 | class Trainer(CN): 200 | DDP: bool 201 | PRECISION: int 202 | GRADIENT_CLIP_VAL: float 203 | MAX_EPOCHS: int 204 | MAX_STEPS: int 205 | ACCUMULATE_GRAD_BATCHES: int 206 | VAL_CHECK_INTERVAL: float 207 | LIMIT_TRAIN_BATCHES: float 208 | LIMIT_VAL_BATCHES: float 209 | LOG_TRAIN_N_STEPS: int 210 | CALLBACKS: Callbacks 211 | 212 | class Model_soup(CN): 213 | USE: bool 214 | RECIPE: str 215 | GREEDY_TARGET: str 216 | 217 | class Analysis(CN): 218 | SAVE_NEURON_LOCATION: bool 219 | DRAW_NEURON_LOCATION: bool 220 | 221 | class AutoConfig(CN): 222 | DESCRIPTION: str 223 | EXPERIMENTAL: Experimental 224 | DATAMODULE: Datamodule 225 | DATASET: Dataset 226 | POSITION_ENCODING: Position_encoding 227 | MODEL: Model 228 | LOSS: Loss 229 | REGULARIZER: Regularizer 230 | OPTIMIZER: Optimizer 231 | TRAINER: Trainer 232 | MODEL_SOUP: Model_soup 233 | RESULTS_DIR: str 234 | CHECKPOINT_DIR: str 235 | ANALYSIS: Analysis 236 | -------------------------------------------------------------------------------- /mem/configs/L.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 8 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - orig 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: false 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | DARK: 42 | ANNEAL: 43 | T: 30 44 | GT_ROIS: 45 | - htroi_1 46 | GT_SCALE_UP_COEF: 1.0 47 | MAX_EPOCH: 100 48 | USE: false 49 | NAME: SmoothL1Loss 50 | SMOOTH_L1_BETA: 0.01 51 | SYNC: 52 | EMA_BETA: 0.9 53 | EMA_BIAS_CORRECTION: false 54 | EMA_KEY: running_grad 55 | EXP_SCALE: 10.0 56 | EXP_SHIFT: 0.0 57 | LOG_SHIFT: 10.0 58 | SKIP_EPOCHS: 10 59 | STAGE: VAL 60 | UPDATE_RULE: exp 61 | USE: false 62 | MODEL: 63 | BACKBONE: 64 | ADAPTIVE_LN: 65 | SCALE: 0.5 66 | CACHE_DIR: /data/cache 67 | CLS_DIMS: 68 | - 2048 69 | - 2048 70 | - 2048 71 | - 1024 72 | FEATURE_DIMS: 73 | - 1024 74 | - 1024 75 | - 1024 76 | - 1024 77 | LAYERS: 78 | - 5 79 | - 11 80 | - 17 81 | - 23 82 | LORA: 83 | RANK: 4 84 | SCALE: 0.2 85 | NAME: dinov2_vit_l 86 | BACKBONE_SMALL: 87 | ADAPTIVE_LN: 88 | SCALE: 0.5 89 | CLS_DIMS: 90 | - 1536 91 | - 1536 92 | - 768 93 | LAYERS: 94 | - 5 95 | - 8 96 | - 11 97 | LORA: 98 | RANK: 4 99 | SCALE: 0.2 100 | MERGE_WIDTH: 128 101 | NAME: dinov2_vit_b 102 | T_DIM: 128 103 | WIDTH: 128 104 | BOTTLENECK: 105 | OUT_DIM: 64 106 | RANK: -1 107 | COND: 108 | DIM: 256 109 | DROPOUT: 0.2 110 | IN_DIM: 35 111 | PASSTHROUGH_DIM: 64 112 | USE: true 113 | CONV_HEAD: 114 | DEPTHS: 115 | - 3 116 | - 3 117 | - 3 118 | - 3 119 | KERNEL_SIZES: 120 | - 5 121 | - 5 122 | - 5 123 | - 5 124 | MAX_DIM: 1024 125 | SIMPLE: true 126 | WIDTH: 256 127 | COORDS_MLP: 128 | DEPTH: 3 129 | LOG: true 130 | WIDTH: 128 131 | LAYER_SELECTOR: {} 132 | MAX_TRAIN_VOXELS: 25600 133 | PREV_FEAT: 134 | DIM: 1024 135 | RETINA_MAPPER: 136 | CONSTANT_SIGMA: 0.01 137 | VOXEL_OUTS: 138 | SHARED: 139 | MLP: 140 | DEPTH: 3 141 | WIDTH: 1024 142 | USE: false 143 | MODEL_SOUP: 144 | GREEDY_TARGET: heldout 145 | RECIPE: greedy 146 | USE: true 147 | OPTIMIZER: 148 | LR: 0.0003 149 | NAME: AdamW 150 | SCHEDULER: 151 | CYCLE_DECAY: 0.5 152 | CYCLE_LIMIT: 3 153 | K_DECAY: 1.5 154 | LR_MIN: 0.0003 155 | LR_MIN_WARMUP: 0.000001 156 | T_INITIAL: 1 157 | T_MULT: 1.0 158 | WARMUP_T: 10 159 | WEIGHT_DECAY: 0.0003 160 | POSITION_ENCODING: 161 | FEATURES: 32 162 | IN_DIM: 3 163 | MAX_STEPS: 1000 164 | PERIODS: 10000 165 | REGULARIZER: 166 | LAYER: 1.0e-04 167 | RESULTS_DIR: /nfscc/ray_results/modelsize/ 168 | TRAINER: 169 | ACCUMULATE_GRAD_BATCHES: 1 170 | CALLBACKS: 171 | CHECKPOINT: 172 | LOAD_BEST_ON_END: false 173 | LOAD_BEST_ON_VAL: false 174 | REMOVE: true 175 | SAVE_TOP_K: 10 176 | EARLY_STOP: 177 | PATIENCE: 30 178 | DDP: false 179 | GRADIENT_CLIP_VAL: 0.5 180 | LIMIT_TRAIN_BATCHES: 0.1 181 | LIMIT_VAL_BATCHES: 0.5 182 | LOG_TRAIN_N_STEPS: 100 183 | MAX_EPOCHS: 1000 184 | MAX_STEPS: -1 185 | PRECISION: 16 186 | VAL_CHECK_INTERVAL: 1.0 187 | -------------------------------------------------------------------------------- /mem/configs/M.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 8 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - orig 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: false 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | DARK: 42 | ANNEAL: 43 | T: 30 44 | GT_ROIS: 45 | - htroi_1 46 | GT_SCALE_UP_COEF: 1.0 47 | MAX_EPOCH: 100 48 | USE: false 49 | NAME: SmoothL1Loss 50 | SMOOTH_L1_BETA: 0.01 51 | SYNC: 52 | EMA_BETA: 0.9 53 | EMA_BIAS_CORRECTION: false 54 | EMA_KEY: running_grad 55 | EXP_SCALE: 10.0 56 | EXP_SHIFT: 0.0 57 | LOG_SHIFT: 10.0 58 | SKIP_EPOCHS: 10 59 | STAGE: VAL 60 | UPDATE_RULE: exp 61 | USE: false 62 | MODEL: 63 | BACKBONE: 64 | ADAPTIVE_LN: 65 | SCALE: 0.5 66 | CACHE_DIR: /data/cache 67 | CLS_DIMS: 68 | - 1536 69 | - 1536 70 | - 1536 71 | - 768 72 | FEATURE_DIMS: 73 | - 768 74 | - 768 75 | - 768 76 | - 768 77 | LAYERS: 78 | - 2 79 | - 5 80 | - 8 81 | - 11 82 | LORA: 83 | RANK: 4 84 | SCALE: 0.2 85 | NAME: dinov2_vit_b 86 | BACKBONE_SMALL: 87 | ADAPTIVE_LN: 88 | SCALE: 0.5 89 | CLS_DIMS: 90 | - 1536 91 | - 1536 92 | - 768 93 | LAYERS: 94 | - 5 95 | - 8 96 | - 11 97 | LORA: 98 | RANK: 4 99 | SCALE: 0.2 100 | MERGE_WIDTH: 128 101 | NAME: dinov2_vit_b 102 | T_DIM: 128 103 | WIDTH: 128 104 | BOTTLENECK: 105 | OUT_DIM: 64 106 | RANK: -1 107 | COND: 108 | DIM: 256 109 | DROPOUT: 0.2 110 | IN_DIM: 35 111 | PASSTHROUGH_DIM: 64 112 | USE: true 113 | CONV_HEAD: 114 | DEPTHS: 115 | - 3 116 | - 3 117 | - 3 118 | - 3 119 | KERNEL_SIZES: 120 | - 5 121 | - 5 122 | - 5 123 | - 5 124 | MAX_DIM: 1024 125 | SIMPLE: true 126 | WIDTH: 256 127 | COORDS_MLP: 128 | DEPTH: 3 129 | LOG: true 130 | WIDTH: 128 131 | LAYER_SELECTOR: {} 132 | MAX_TRAIN_VOXELS: 25600 133 | PREV_FEAT: 134 | DIM: 1024 135 | RETINA_MAPPER: 136 | CONSTANT_SIGMA: 0.01 137 | VOXEL_OUTS: 138 | SHARED: 139 | MLP: 140 | DEPTH: 3 141 | WIDTH: 1024 142 | USE: false 143 | MODEL_SOUP: 144 | GREEDY_TARGET: heldout 145 | RECIPE: greedy 146 | USE: true 147 | OPTIMIZER: 148 | LR: 0.0003 149 | NAME: AdamW 150 | SCHEDULER: 151 | CYCLE_DECAY: 0.5 152 | CYCLE_LIMIT: 3 153 | K_DECAY: 1.5 154 | LR_MIN: 0.0003 155 | LR_MIN_WARMUP: 0.000001 156 | T_INITIAL: 1 157 | T_MULT: 1.0 158 | WARMUP_T: 10 159 | WEIGHT_DECAY: 0.0003 160 | POSITION_ENCODING: 161 | FEATURES: 32 162 | IN_DIM: 3 163 | MAX_STEPS: 1000 164 | PERIODS: 10000 165 | REGULARIZER: 166 | LAYER: 1.0e-04 167 | RESULTS_DIR: /nfscc/ray_results/modelsize/ 168 | TRAINER: 169 | ACCUMULATE_GRAD_BATCHES: 1 170 | CALLBACKS: 171 | CHECKPOINT: 172 | LOAD_BEST_ON_END: false 173 | LOAD_BEST_ON_VAL: false 174 | REMOVE: true 175 | SAVE_TOP_K: 10 176 | EARLY_STOP: 177 | PATIENCE: 30 178 | DDP: false 179 | GRADIENT_CLIP_VAL: 0.5 180 | LIMIT_TRAIN_BATCHES: 0.1 181 | LIMIT_VAL_BATCHES: 0.5 182 | LOG_TRAIN_N_STEPS: 100 183 | MAX_EPOCHS: 1000 184 | MAX_STEPS: -1 185 | PRECISION: 16 186 | VAL_CHECK_INTERVAL: 1.0 187 | -------------------------------------------------------------------------------- /mem/configs/S.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 8 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - orig 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: false 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | DARK: 42 | ANNEAL: 43 | T: 30 44 | GT_ROIS: 45 | - htroi_1 46 | GT_SCALE_UP_COEF: 1.0 47 | MAX_EPOCH: 100 48 | USE: false 49 | NAME: SmoothL1Loss 50 | SMOOTH_L1_BETA: 0.01 51 | SYNC: 52 | EMA_BETA: 0.9 53 | EMA_BIAS_CORRECTION: false 54 | EMA_KEY: running_grad 55 | EXP_SCALE: 10.0 56 | EXP_SHIFT: 0.0 57 | LOG_SHIFT: 10.0 58 | SKIP_EPOCHS: 10 59 | STAGE: VAL 60 | UPDATE_RULE: exp 61 | USE: false 62 | MODEL: 63 | BACKBONE: 64 | ADAPTIVE_LN: 65 | SCALE: 0.5 66 | CACHE_DIR: /data/cache 67 | CLS_DIMS: 68 | - 1536 69 | - 1536 70 | - 1536 71 | - 768 72 | FEATURE_DIMS: 73 | - 768 74 | - 768 75 | - 768 76 | - 768 77 | LAYERS: 78 | - 2 79 | - 5 80 | - 8 81 | - 11 82 | LORA: 83 | RANK: 4 84 | SCALE: 0.2 85 | NAME: dinov2_vit_b 86 | BACKBONE_SMALL: 87 | ADAPTIVE_LN: 88 | SCALE: 0.5 89 | CLS_DIMS: 90 | - 384 91 | LAYERS: 92 | - 11 93 | LORA: 94 | RANK: 4 95 | SCALE: 0.2 96 | MERGE_WIDTH: 128 97 | NAME: dinov2_vit_s 98 | T_DIM: 128 99 | WIDTH: 128 100 | BOTTLENECK: 101 | OUT_DIM: 64 102 | RANK: -1 103 | COND: 104 | DIM: 256 105 | DROPOUT: 0.2 106 | IN_DIM: 35 107 | PASSTHROUGH_DIM: 64 108 | USE: true 109 | CONV_HEAD: 110 | DEPTHS: 111 | - 3 112 | - 3 113 | - 3 114 | - 3 115 | KERNEL_SIZES: 116 | - 5 117 | - 5 118 | - 5 119 | - 5 120 | MAX_DIM: 1024 121 | SIMPLE: true 122 | WIDTH: 256 123 | COORDS_MLP: 124 | DEPTH: 3 125 | LOG: true 126 | WIDTH: 128 127 | LAYER_SELECTOR: {} 128 | MAX_TRAIN_VOXELS: 25600 129 | PREV_FEAT: 130 | DIM: 1024 131 | RETINA_MAPPER: 132 | CONSTANT_SIGMA: 0.01 133 | VOXEL_OUTS: 134 | SHARED: 135 | MLP: 136 | DEPTH: 3 137 | WIDTH: 1024 138 | USE: false 139 | MODEL_SOUP: 140 | GREEDY_TARGET: heldout 141 | RECIPE: greedy 142 | USE: true 143 | OPTIMIZER: 144 | LR: 0.0003 145 | NAME: AdamW 146 | SCHEDULER: 147 | CYCLE_DECAY: 0.5 148 | CYCLE_LIMIT: 3 149 | K_DECAY: 1.5 150 | LR_MIN: 0.0003 151 | LR_MIN_WARMUP: 0.000001 152 | T_INITIAL: 1 153 | T_MULT: 1.0 154 | WARMUP_T: 10 155 | WEIGHT_DECAY: 0.0003 156 | POSITION_ENCODING: 157 | FEATURES: 32 158 | IN_DIM: 3 159 | MAX_STEPS: 1000 160 | PERIODS: 10000 161 | REGULARIZER: 162 | LAYER: 1.0e-04 163 | RESULTS_DIR: /nfscc/ray_results/modelsize/ 164 | TRAINER: 165 | ACCUMULATE_GRAD_BATCHES: 1 166 | CALLBACKS: 167 | CHECKPOINT: 168 | LOAD_BEST_ON_END: false 169 | LOAD_BEST_ON_VAL: false 170 | REMOVE: true 171 | SAVE_TOP_K: 10 172 | EARLY_STOP: 173 | PATIENCE: 30 174 | DDP: false 175 | GRADIENT_CLIP_VAL: 0.5 176 | LIMIT_TRAIN_BATCHES: 0.1 177 | LIMIT_VAL_BATCHES: 0.5 178 | LOG_TRAIN_N_STEPS: 100 179 | MAX_EPOCHS: 1000 180 | MAX_STEPS: -1 181 | PRECISION: 16 182 | VAL_CHECK_INTERVAL: 1.0 183 | -------------------------------------------------------------------------------- /mem/configs/bold5000.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: visual_B 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/mybold5000 23 | SUBJECT_LIST: 24 | - CSI1 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: true 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | NAME: SmoothL1Loss 42 | SMOOTH_L1_BETA: 0.01 43 | SYNC: 44 | EMA_BETA: 0.9 45 | EMA_BIAS_CORRECTION: false 46 | EMA_KEY: running_grad 47 | EXP_SCALE: 10.0 48 | EXP_SHIFT: 0.0 49 | LOG_SHIFT: 10.0 50 | SKIP_EPOCHS: 10 51 | STAGE: VAL 52 | UPDATE_RULE: raw 53 | USE: false 54 | MODEL: 55 | BACKBONE: 56 | ADAPTIVE_LN: 57 | SCALE: 0.5 58 | CACHE_DIR: /data/cache 59 | CLS_DIMS: 60 | - 768 61 | - 768 62 | - 768 63 | - 384 64 | FEATURE_DIMS: 65 | - 384 66 | - 384 67 | - 384 68 | - 384 69 | LAYERS: 70 | - 2 71 | - 5 72 | - 8 73 | - 11 74 | LORA: 75 | RANK: 4 76 | SCALE: 0.2 77 | NAME: dinov2_vit_s 78 | BACKBONE_SMALL: 79 | ADAPTIVE_LN: 80 | SCALE: 0.5 81 | CLS_DIMS: 82 | - 384 83 | LAYERS: 84 | - 11 85 | LORA: 86 | RANK: 4 87 | SCALE: 0.2 88 | MERGE_WIDTH: 32 89 | NAME: dinov2_vit_s 90 | T_DIM: 128 91 | WIDTH: 32 92 | BOTTLENECK: 93 | OUT_DIM: 64 94 | RANK: -1 95 | COND: 96 | DIM: 256 97 | DROPOUT: 0.2 98 | IN_DIM: 35 99 | PASSTHROUGH_DIM: 16 100 | USE: true 101 | CONV_HEAD: 102 | DEPTHS: 103 | - 3 104 | - 3 105 | - 3 106 | - 3 107 | KERNEL_SIZES: 108 | - 5 109 | - 5 110 | - 5 111 | - 5 112 | MAX_DIM: 64 113 | SIMPLE: true 114 | WIDTH: 64 115 | COORDS_MLP: 116 | DEPTH: 3 117 | LOG: false 118 | WIDTH: 32 119 | LAYER_SELECTOR: {} 120 | MAX_TRAIN_VOXELS: 25600 121 | PREV_FEAT: 122 | DIM: 1024 123 | RETINA_MAPPER: 124 | CONSTANT_SIGMA: 0.01 125 | VOXEL_OUTS: 126 | SHARED: 127 | MLP: 128 | DEPTH: 3 129 | WIDTH: 1024 130 | USE: false 131 | MODEL_SOUP: 132 | GREEDY_TARGET: heldout 133 | RECIPE: greedy 134 | USE: true 135 | OPTIMIZER: 136 | LR: 0.001 137 | NAME: AdamW 138 | SCHEDULER: 139 | CYCLE_DECAY: 0.5 140 | CYCLE_LIMIT: 3 141 | K_DECAY: 1.5 142 | LR_MIN: 0.001 143 | LR_MIN_WARMUP: 0.0001 144 | T_INITIAL: 1 145 | T_MULT: 1.0 146 | WARMUP_T: 1 147 | WEIGHT_DECAY: 0.0003 148 | POSITION_ENCODING: 149 | FEATURES: 32 150 | IN_DIM: 3 151 | MAX_STEPS: 1000 152 | PERIODS: 10000 153 | REGULARIZER: 154 | LAYER: 3.0e-05 155 | RESULTS_DIR: /nfscc/alg23/ray_results/ 156 | TRAINER: 157 | ACCUMULATE_GRAD_BATCHES: 1 158 | CALLBACKS: 159 | CHECKPOINT: 160 | LOAD_BEST_ON_END: false 161 | LOAD_BEST_ON_VAL: false 162 | REMOVE: true 163 | SAVE_TOP_K: 10 164 | EARLY_STOP: 165 | PATIENCE: 15 166 | DDP: false 167 | GRADIENT_CLIP_VAL: 0.5 168 | LIMIT_TRAIN_BATCHES: 1.0 169 | LIMIT_VAL_BATCHES: 1.0 170 | LOG_TRAIN_N_STEPS: 100 171 | MAX_EPOCHS: 1000 172 | MAX_STEPS: -1 173 | PRECISION: 16 174 | VAL_CHECK_INTERVAL: 1.0 175 | -------------------------------------------------------------------------------- /mem/configs/debug.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 8 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: false 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | DARK: 42 | ANNEAL: 43 | T: 30 44 | GT_ROIS: 45 | - htroi_1 46 | GT_SCALE_UP_COEF: 1.0 47 | MAX_EPOCH: 100 48 | USE: false 49 | NAME: SmoothL1Loss 50 | SMOOTH_L1_BETA: 0.01 51 | SYNC: 52 | EMA_BETA: 0.9 53 | EMA_BIAS_CORRECTION: false 54 | EMA_KEY: running_grad 55 | EXP_SCALE: 10.0 56 | EXP_SHIFT: 0.0 57 | LOG_SHIFT: 10.0 58 | SKIP_EPOCHS: 10 59 | STAGE: VAL 60 | UPDATE_RULE: raw 61 | USE: false 62 | MODEL: 63 | BACKBONE: 64 | ADAPTIVE_LN: 65 | SCALE: 0.5 66 | CACHE_DIR: /data/cache 67 | CLS_DIMS: 68 | - 1536 69 | - 1536 70 | - 1536 71 | - 768 72 | FEATURE_DIMS: 73 | - 768 74 | - 768 75 | - 768 76 | - 768 77 | LAYERS: 78 | - 2 79 | - 5 80 | - 8 81 | - 11 82 | LORA: 83 | RANK: 4 84 | SCALE: 0.2 85 | NAME: dinov2_vit_b 86 | BACKBONE_SMALL: 87 | ADAPTIVE_LN: 88 | SCALE: 0.5 89 | CLS_DIMS: 90 | - 384 91 | LAYERS: 92 | - 11 93 | LORA: 94 | RANK: 4 95 | SCALE: 0.2 96 | MERGE_WIDTH: 128 97 | NAME: dinov2_vit_s 98 | T_DIM: 128 99 | WIDTH: 128 100 | BOTTLENECK: 101 | OUT_DIM: 64 102 | RANK: -1 103 | COND: 104 | DIM: 256 105 | DROPOUT: 0.2 106 | IN_DIM: 35 107 | PASSTHROUGH_DIM: 64 108 | USE: true 109 | CONV_HEAD: 110 | DEPTHS: 111 | - 3 112 | - 3 113 | - 3 114 | - 3 115 | KERNEL_SIZES: 116 | - 5 117 | - 5 118 | - 5 119 | - 5 120 | MAX_DIM: 1024 121 | SIMPLE: true 122 | WIDTH: 256 123 | COORDS_MLP: 124 | DEPTH: 3 125 | LOG: true 126 | WIDTH: 128 127 | LAYER_SELECTOR: {} 128 | MAX_TRAIN_VOXELS: 25600 129 | PREV_FEAT: 130 | DIM: 1024 131 | RETINA_MAPPER: 132 | CONSTANT_SIGMA: 0.01 133 | VOXEL_OUTS: 134 | SHARED: 135 | MLP: 136 | DEPTH: 3 137 | WIDTH: 1024 138 | USE: false 139 | MODEL_SOUP: 140 | GREEDY_TARGET: heldout 141 | RECIPE: greedy 142 | USE: true 143 | OPTIMIZER: 144 | LR: 0.0003 145 | NAME: AdamW 146 | SCHEDULER: 147 | CYCLE_DECAY: 0.5 148 | CYCLE_LIMIT: 3 149 | K_DECAY: 1.5 150 | LR_MIN: 0.0003 151 | LR_MIN_WARMUP: 0.000001 152 | T_INITIAL: 1 153 | T_MULT: 1.0 154 | WARMUP_T: 10 155 | WEIGHT_DECAY: 0.0003 156 | POSITION_ENCODING: 157 | FEATURES: 32 158 | IN_DIM: 3 159 | MAX_STEPS: 1000 160 | PERIODS: 10000 161 | REGULARIZER: 162 | LAYER: 3.0e-05 163 | RESULTS_DIR: /nfscc/alg23/ray_results/ 164 | TRAINER: 165 | ACCUMULATE_GRAD_BATCHES: 1 166 | CALLBACKS: 167 | CHECKPOINT: 168 | LOAD_BEST_ON_END: false 169 | LOAD_BEST_ON_VAL: false 170 | REMOVE: true 171 | SAVE_TOP_K: 10 172 | EARLY_STOP: 173 | PATIENCE: 30 174 | DDP: false 175 | GRADIENT_CLIP_VAL: 0.5 176 | LIMIT_TRAIN_BATCHES: 0.1 177 | LIMIT_VAL_BATCHES: 0.5 178 | LOG_TRAIN_N_STEPS: 100 179 | MAX_EPOCHS: 1000 180 | MAX_STEPS: -1 181 | PRECISION: 16 182 | VAL_CHECK_INTERVAL: 1.0 183 | -------------------------------------------------------------------------------- /mem/configs/dev.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: true 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | NAME: SmoothL1Loss 42 | SMOOTH_L1_BETA: 0.01 43 | SYNC: 44 | EMA_BETA: 0.9 45 | EMA_BIAS_CORRECTION: false 46 | EMA_KEY: running_grad 47 | EXP_SCALE: 10.0 48 | EXP_SHIFT: 0.0 49 | LOG_SHIFT: 10.0 50 | SKIP_EPOCHS: 10 51 | STAGE: VAL 52 | UPDATE_RULE: raw 53 | USE: false 54 | MODEL: 55 | BACKBONE: 56 | ADAPTIVE_LN: 57 | SCALE: 0.5 58 | CACHE_DIR: /data/cache 59 | CLS_DIMS: 60 | - 768 61 | - 768 62 | - 768 63 | - 384 64 | FEATURE_DIMS: 65 | - 384 66 | - 384 67 | - 384 68 | - 384 69 | LAYERS: 70 | - 2 71 | - 5 72 | - 8 73 | - 11 74 | LORA: 75 | RANK: 4 76 | SCALE: 0.2 77 | NAME: dinov2_vit_s 78 | BACKBONE_SMALL: 79 | ADAPTIVE_LN: 80 | SCALE: 0.5 81 | CLS_DIMS: 82 | - 384 83 | LAYERS: 84 | - 11 85 | LORA: 86 | RANK: 4 87 | SCALE: 0.2 88 | MERGE_WIDTH: 32 89 | NAME: dinov2_vit_s 90 | T_DIM: 128 91 | WIDTH: 32 92 | BOTTLENECK: 93 | OUT_DIM: 64 94 | RANK: -1 95 | COND: 96 | DIM: 256 97 | DROPOUT: 0.2 98 | IN_DIM: 35 99 | PASSTHROUGH_DIM: 16 100 | USE: true 101 | CONV_HEAD: 102 | DEPTHS: 103 | - 3 104 | - 3 105 | - 3 106 | - 3 107 | KERNEL_SIZES: 108 | - 5 109 | - 5 110 | - 5 111 | - 5 112 | MAX_DIM: 64 113 | SIMPLE: true 114 | WIDTH: 64 115 | COORDS_MLP: 116 | DEPTH: 3 117 | LOG: false 118 | WIDTH: 32 119 | LAYER_SELECTOR: {} 120 | MAX_TRAIN_VOXELS: 25600 121 | PREV_FEAT: 122 | DIM: 1024 123 | RETINA_MAPPER: 124 | CONSTANT_SIGMA: 0.01 125 | VOXEL_OUTS: 126 | SHARED: 127 | MLP: 128 | DEPTH: 3 129 | WIDTH: 1024 130 | USE: false 131 | MODEL_SOUP: 132 | GREEDY_TARGET: heldout 133 | RECIPE: greedy 134 | USE: true 135 | OPTIMIZER: 136 | LR: 0.001 137 | NAME: AdamW 138 | SCHEDULER: 139 | CYCLE_DECAY: 0.5 140 | CYCLE_LIMIT: 3 141 | K_DECAY: 1.5 142 | LR_MIN: 0.001 143 | LR_MIN_WARMUP: 0.0001 144 | T_INITIAL: 1 145 | T_MULT: 1.0 146 | WARMUP_T: 1 147 | WEIGHT_DECAY: 0.0003 148 | POSITION_ENCODING: 149 | FEATURES: 32 150 | IN_DIM: 3 151 | MAX_STEPS: 1000 152 | PERIODS: 10000 153 | REGULARIZER: 154 | LAYER: 3.0e-05 155 | RESULTS_DIR: /nfscc/alg23/ray_results/ 156 | TRAINER: 157 | ACCUMULATE_GRAD_BATCHES: 1 158 | CALLBACKS: 159 | CHECKPOINT: 160 | LOAD_BEST_ON_END: false 161 | LOAD_BEST_ON_VAL: false 162 | REMOVE: true 163 | SAVE_TOP_K: 10 164 | EARLY_STOP: 165 | PATIENCE: 10 166 | DDP: false 167 | GRADIENT_CLIP_VAL: 0.5 168 | LIMIT_TRAIN_BATCHES: 1.0 169 | LIMIT_VAL_BATCHES: 1.0 170 | LOG_TRAIN_N_STEPS: 100 171 | MAX_EPOCHS: 1000 172 | MAX_STEPS: -1 173 | PRECISION: 16 174 | VAL_CHECK_INTERVAL: 1.0 175 | -------------------------------------------------------------------------------- /mem/configs/dev_B.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: true 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | NAME: SmoothL1Loss 42 | SMOOTH_L1_BETA: 0.01 43 | SYNC: 44 | EMA_BETA: 0.9 45 | EMA_BIAS_CORRECTION: false 46 | EMA_KEY: running_grad 47 | EXP_SCALE: 10.0 48 | EXP_SHIFT: 0.0 49 | LOG_SHIFT: 10.0 50 | SKIP_EPOCHS: 10 51 | STAGE: VAL 52 | UPDATE_RULE: raw 53 | USE: false 54 | MODEL: 55 | BACKBONE: 56 | ADAPTIVE_LN: 57 | SCALE: 0.5 58 | CACHE_DIR: /data/cache 59 | CLS_DIMS: 60 | - 1536 61 | - 1536 62 | - 1536 63 | - 768 64 | FEATURE_DIMS: 65 | - 768 66 | - 768 67 | - 768 68 | - 768 69 | LAYERS: 70 | - 2 71 | - 5 72 | - 8 73 | - 11 74 | LORA: 75 | RANK: 4 76 | SCALE: 0.2 77 | NAME: dinov2_vit_b 78 | BACKBONE_SMALL: 79 | ADAPTIVE_LN: 80 | SCALE: 0.5 81 | CLS_DIMS: 82 | - 384 83 | LAYERS: 84 | - 11 85 | LORA: 86 | RANK: 4 87 | SCALE: 0.2 88 | MERGE_WIDTH: 32 89 | NAME: dinov2_vit_s 90 | T_DIM: 128 91 | WIDTH: 32 92 | BOTTLENECK: 93 | OUT_DIM: 64 94 | RANK: -1 95 | COND: 96 | DIM: 256 97 | DROPOUT: 0.2 98 | IN_DIM: 35 99 | PASSTHROUGH_DIM: 16 100 | USE: true 101 | CONV_HEAD: 102 | DEPTHS: 103 | - 3 104 | - 3 105 | - 3 106 | - 3 107 | KERNEL_SIZES: 108 | - 5 109 | - 5 110 | - 5 111 | - 5 112 | MAX_DIM: 64 113 | SIMPLE: true 114 | WIDTH: 64 115 | COORDS_MLP: 116 | DEPTH: 3 117 | LOG: false 118 | WIDTH: 32 119 | LAYER_SELECTOR: {} 120 | MAX_TRAIN_VOXELS: 25600 121 | PREV_FEAT: 122 | DIM: 1024 123 | RETINA_MAPPER: 124 | CONSTANT_SIGMA: 0.01 125 | VOXEL_OUTS: 126 | SHARED: 127 | MLP: 128 | DEPTH: 3 129 | WIDTH: 1024 130 | USE: false 131 | MODEL_SOUP: 132 | GREEDY_TARGET: heldout 133 | RECIPE: greedy 134 | USE: true 135 | OPTIMIZER: 136 | LR: 0.001 137 | NAME: AdamW 138 | SCHEDULER: 139 | CYCLE_DECAY: 0.5 140 | CYCLE_LIMIT: 3 141 | K_DECAY: 1.5 142 | LR_MIN: 0.001 143 | LR_MIN_WARMUP: 0.0001 144 | T_INITIAL: 1 145 | T_MULT: 1.0 146 | WARMUP_T: 1 147 | WEIGHT_DECAY: 0.0003 148 | POSITION_ENCODING: 149 | FEATURES: 32 150 | IN_DIM: 3 151 | MAX_STEPS: 1000 152 | PERIODS: 10000 153 | REGULARIZER: 154 | LAYER: 3.0e-05 155 | RESULTS_DIR: /nfscc/alg23/ray_results/ 156 | TRAINER: 157 | ACCUMULATE_GRAD_BATCHES: 1 158 | CALLBACKS: 159 | CHECKPOINT: 160 | LOAD_BEST_ON_END: false 161 | LOAD_BEST_ON_VAL: false 162 | REMOVE: true 163 | SAVE_TOP_K: 10 164 | EARLY_STOP: 165 | PATIENCE: 10 166 | DDP: false 167 | GRADIENT_CLIP_VAL: 0.5 168 | LIMIT_TRAIN_BATCHES: 1.0 169 | LIMIT_VAL_BATCHES: 1.0 170 | LOG_TRAIN_N_STEPS: 100 171 | MAX_EPOCHS: 1000 172 | MAX_STEPS: -1 173 | PRECISION: 16 174 | VAL_CHECK_INTERVAL: 1.0 175 | -------------------------------------------------------------------------------- /mem/configs/xvaa.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 8 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | DESCRIPTION: Default config 26 | EXPERIMENTAL: 27 | ANOTHER_SPLIT: false 28 | BEHV_ONLY: false 29 | BEHV_SELECTION: 30 | - -1 31 | BLANK_IMAGE: false 32 | NO_SPLIT: false 33 | SHUFFLE_IMAGES: false 34 | SHUFFLE_VAL: true 35 | STRAIGHT_FORWARD: false 36 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 37 | T_IMAGE: 0 38 | USE_DEV_MODEL: false 39 | USE_PREV_FRAME: true 40 | LOSS: 41 | DARK: 42 | ANNEAL: 43 | T: 30 44 | GT_ROIS: 45 | - htroi_1 46 | GT_SCALE_UP_COEF: 1.0 47 | MAX_EPOCH: 100 48 | USE: false 49 | NAME: SmoothL1Loss 50 | SMOOTH_L1_BETA: 0.01 51 | SYNC: 52 | EMA_BETA: 0.9 53 | EMA_BIAS_CORRECTION: false 54 | EMA_KEY: running_grad 55 | EXP_SCALE: 10.0 56 | EXP_SHIFT: 0.0 57 | LOG_SHIFT: 10.0 58 | SKIP_EPOCHS: 10 59 | STAGE: VAL 60 | UPDATE_RULE: exp 61 | USE: false 62 | MODEL: 63 | BACKBONE: 64 | ADAPTIVE_LN: 65 | SCALE: 0.5 66 | CACHE_DIR: /data/cache 67 | CLS_DIMS: 68 | - 1536 69 | - 1536 70 | - 1536 71 | - 768 72 | FEATURE_DIMS: 73 | - 768 74 | - 768 75 | - 768 76 | - 768 77 | LAYERS: 78 | - 2 79 | - 5 80 | - 8 81 | - 11 82 | LORA: 83 | RANK: 4 84 | SCALE: 0.2 85 | NAME: dinov2_vit_b 86 | BACKBONE_SMALL: 87 | ADAPTIVE_LN: 88 | SCALE: 0.5 89 | CLS_DIMS: 90 | - 384 91 | LAYERS: 92 | - 11 93 | LORA: 94 | RANK: 4 95 | SCALE: 0.2 96 | MERGE_WIDTH: 64 97 | NAME: dinov2_vit_s 98 | T_DIM: 64 99 | WIDTH: 64 100 | BOTTLENECK: 101 | OUT_DIM: 64 102 | RANK: -1 103 | COND: 104 | DIM: 256 105 | DROPOUT: 0.2 106 | IN_DIM: 35 107 | PASSTHROUGH_DIM: 32 108 | USE: true 109 | CONV_HEAD: 110 | DEPTHS: 111 | - 3 112 | - 3 113 | - 3 114 | - 3 115 | KERNEL_SIZES: 116 | - 5 117 | - 5 118 | - 5 119 | - 5 120 | MAX_DIM: 1024 121 | SIMPLE: true 122 | WIDTH: 192 123 | COORDS_MLP: 124 | DEPTH: 3 125 | LOG: true 126 | WIDTH: 128 127 | LAYER_SELECTOR: {} 128 | MAX_TRAIN_VOXELS: 25600 129 | PREV_FEAT: 130 | DIM: 1024 131 | RETINA_MAPPER: 132 | CONSTANT_SIGMA: 0.01 133 | VOXEL_OUTS: 134 | SHARED: 135 | MLP: 136 | DEPTH: 3 137 | WIDTH: 1024 138 | USE: false 139 | MODEL_SOUP: 140 | GREEDY_TARGET: heldout 141 | RECIPE: greedy 142 | USE: true 143 | OPTIMIZER: 144 | LR: 0.0003 145 | NAME: AdamW 146 | SCHEDULER: 147 | CYCLE_DECAY: 0.5 148 | CYCLE_LIMIT: 3 149 | K_DECAY: 1.5 150 | LR_MIN: 0.0003 151 | LR_MIN_WARMUP: 0.000001 152 | T_INITIAL: 1 153 | T_MULT: 1.0 154 | WARMUP_T: 10 155 | WEIGHT_DECAY: 0.0003 156 | POSITION_ENCODING: 157 | FEATURES: 32 158 | IN_DIM: 3 159 | MAX_STEPS: 1000 160 | PERIODS: 10000 161 | REGULARIZER: 162 | LAYER: 3.0e-05 163 | RESULTS_DIR: /nfscc/alg23/ray_results/ 164 | TRAINER: 165 | ACCUMULATE_GRAD_BATCHES: 1 166 | CALLBACKS: 167 | CHECKPOINT: 168 | LOAD_BEST_ON_END: false 169 | LOAD_BEST_ON_VAL: false 170 | REMOVE: true 171 | SAVE_TOP_K: 10 172 | EARLY_STOP: 173 | PATIENCE: 30 174 | DDP: false 175 | GRADIENT_CLIP_VAL: 0.5 176 | LIMIT_TRAIN_BATCHES: 0.1 177 | LIMIT_VAL_BATCHES: 0.5 178 | LOG_TRAIN_N_STEPS: 100 179 | MAX_EPOCHS: 1000 180 | MAX_STEPS: -1 181 | PRECISION: 16 182 | VAL_CHECK_INTERVAL: 1.0 183 | -------------------------------------------------------------------------------- /mem/configs/xvba.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: '' 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | - subj02 26 | - subj03 27 | - subj04 28 | - subj05 29 | - subj06 30 | - subj07 31 | - subj08 32 | DESCRIPTION: Default config 33 | EXPERIMENTAL: 34 | ANOTHER_SPLIT: false 35 | BEHV_ONLY: false 36 | BEHV_SELECTION: 37 | - -1 38 | BLANK_IMAGE: false 39 | NO_SPLIT: false 40 | SHUFFLE_IMAGES: false 41 | SHUFFLE_VAL: true 42 | STRAIGHT_FORWARD: false 43 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 44 | T_IMAGE: 0 45 | USE_DEV_MODEL: false 46 | USE_PREV_FRAME: true 47 | LOSS: 48 | DARK: 49 | ANNEAL: 50 | T: 30 51 | GT_ROIS: 52 | - htroi_1 53 | GT_SCALE_UP_COEF: 1.0 54 | MAX_EPOCH: 100 55 | USE: false 56 | NAME: SmoothL1Loss 57 | SMOOTH_L1_BETA: 0.01 58 | SYNC: 59 | EMA_BETA: 0.9 60 | EMA_BIAS_CORRECTION: false 61 | EMA_KEY: running_grad 62 | EXP_SCALE: 10.0 63 | EXP_SHIFT: 0.0 64 | LOG_SHIFT: 10.0 65 | SKIP_EPOCHS: 20 66 | STAGE: VAL 67 | UPDATE_RULE: raw 68 | USE: true 69 | MODEL: 70 | BACKBONE: 71 | ADAPTIVE_LN: 72 | SCALE: 0.5 73 | CACHE_DIR: /data/cache 74 | CLS_DIMS: 75 | - 1536 76 | - 1536 77 | - 1536 78 | - 768 79 | FEATURE_DIMS: 80 | - 768 81 | - 768 82 | - 768 83 | - 768 84 | LAYERS: 85 | - 2 86 | - 5 87 | - 8 88 | - 11 89 | LORA: 90 | RANK: 4 91 | SCALE: 0.2 92 | NAME: dinov2_vit_b 93 | BACKBONE_SMALL: 94 | ADAPTIVE_LN: 95 | SCALE: 0.5 96 | CLS_DIMS: 97 | - 384 98 | LAYERS: 99 | - 11 100 | LORA: 101 | RANK: 4 102 | SCALE: 0.2 103 | MERGE_WIDTH: 64 104 | NAME: dinov2_vit_s 105 | T_DIM: 64 106 | WIDTH: 64 107 | BOTTLENECK: 108 | OUT_DIM: 64 109 | RANK: -1 110 | COND: 111 | DIM: 256 112 | DROPOUT: 0.2 113 | IN_DIM: 35 114 | PASSTHROUGH_DIM: 32 115 | USE: true 116 | CONV_HEAD: 117 | DEPTHS: 118 | - 3 119 | - 3 120 | - 3 121 | - 3 122 | KERNEL_SIZES: 123 | - 5 124 | - 5 125 | - 5 126 | - 5 127 | MAX_DIM: 1024 128 | SIMPLE: true 129 | WIDTH: 192 130 | COORDS_MLP: 131 | DEPTH: 3 132 | LOG: false 133 | WIDTH: 128 134 | LAYER_SELECTOR: {} 135 | MAX_TRAIN_VOXELS: 25600 136 | PREV_FEAT: 137 | DIM: 1024 138 | RETINA_MAPPER: 139 | CONSTANT_SIGMA: 0.01 140 | VOXEL_OUTS: 141 | SHARED: 142 | MLP: 143 | DEPTH: 3 144 | WIDTH: 1024 145 | USE: false 146 | MODEL_SOUP: 147 | GREEDY_TARGET: heldout 148 | RECIPE: greedy 149 | USE: true 150 | OPTIMIZER: 151 | LR: 0.0003 152 | NAME: AdamW 153 | SCHEDULER: 154 | CYCLE_DECAY: 0.5 155 | CYCLE_LIMIT: 3 156 | K_DECAY: 1.5 157 | LR_MIN: 0.0003 158 | LR_MIN_WARMUP: 0.000001 159 | T_INITIAL: 1 160 | T_MULT: 1.0 161 | WARMUP_T: 20 162 | WEIGHT_DECAY: 0.0003 163 | POSITION_ENCODING: 164 | FEATURES: 32 165 | IN_DIM: 3 166 | MAX_STEPS: 1000 167 | PERIODS: 10000 168 | REGULARIZER: 169 | LAYER: 3.0e-05 170 | RESULTS_DIR: /nfscc/alg23/ray_results/ 171 | TRAINER: 172 | ACCUMULATE_GRAD_BATCHES: 1 173 | CALLBACKS: 174 | CHECKPOINT: 175 | LOAD_BEST_ON_END: false 176 | LOAD_BEST_ON_VAL: false 177 | REMOVE: true 178 | SAVE_TOP_K: 10 179 | EARLY_STOP: 180 | PATIENCE: 120 181 | DDP: false 182 | GRADIENT_CLIP_VAL: 0.5 183 | LIMIT_TRAIN_BATCHES: 0.05 184 | LIMIT_VAL_BATCHES: 0.25 185 | LOG_TRAIN_N_STEPS: 100 186 | MAX_EPOCHS: 500 187 | MAX_STEPS: -1 188 | PRECISION: 16 189 | VAL_CHECK_INTERVAL: 1.0 190 | -------------------------------------------------------------------------------- /mem/configs/xvea.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: xvdb 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | - subj02 26 | - subj03 27 | - subj04 28 | - subj05 29 | - subj06 30 | - subj07 31 | - subj08 32 | DESCRIPTION: Default config 33 | EXPERIMENTAL: 34 | ANOTHER_SPLIT: false 35 | BEHV_ONLY: false 36 | BEHV_SELECTION: 37 | - -1 38 | BLANK_IMAGE: false 39 | NO_SPLIT: false 40 | SHUFFLE_IMAGES: false 41 | SHUFFLE_VAL: true 42 | STRAIGHT_FORWARD: false 43 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 44 | T_IMAGE: 0 45 | USE_DEV_MODEL: false 46 | USE_PREV_FRAME: true 47 | LOSS: 48 | DARK: 49 | MAX_EPOCH: 300 50 | USE: true 51 | NAME: SmoothL1Loss 52 | SMOOTH_L1_BETA: 0.01 53 | SYNC: 54 | EMA_BETA: 0.9 55 | EMA_BIAS_CORRECTION: false 56 | EMA_KEY: running_grad 57 | EXP_SCALE: 10.0 58 | EXP_SHIFT: 0.0 59 | LOG_SHIFT: 10.0 60 | SKIP_EPOCHS: 20 61 | STAGE: VAL 62 | UPDATE_RULE: raw 63 | USE: true 64 | MODEL: 65 | BACKBONE: 66 | ADAPTIVE_LN: 67 | SCALE: 0.5 68 | CACHE_DIR: /data/cache 69 | CLS_DIMS: 70 | - 1536 71 | - 1536 72 | - 1536 73 | - 768 74 | FEATURE_DIMS: 75 | - 768 76 | - 768 77 | - 768 78 | - 768 79 | LAYERS: 80 | - 2 81 | - 5 82 | - 8 83 | - 11 84 | LORA: 85 | RANK: 4 86 | SCALE: 0.2 87 | NAME: dinov2_vit_b 88 | BACKBONE_SMALL: 89 | ADAPTIVE_LN: 90 | SCALE: 0.5 91 | CLS_DIMS: 92 | - 384 93 | LAYERS: 94 | - 11 95 | LORA: 96 | RANK: 4 97 | SCALE: 0.2 98 | MERGE_WIDTH: 64 99 | NAME: dinov2_vit_s 100 | T_DIM: 64 101 | WIDTH: 64 102 | BOTTLENECK: 103 | OUT_DIM: 64 104 | RANK: -1 105 | COND: 106 | DIM: 256 107 | DROPOUT: 0.2 108 | IN_DIM: 35 109 | PASSTHROUGH_DIM: 32 110 | USE: true 111 | CONV_HEAD: 112 | DEPTHS: 113 | - 3 114 | - 3 115 | - 3 116 | - 3 117 | KERNEL_SIZES: 118 | - 5 119 | - 5 120 | - 5 121 | - 5 122 | MAX_DIM: 1024 123 | SIMPLE: true 124 | WIDTH: 192 125 | COORDS_MLP: 126 | DEPTH: 3 127 | LOG: false 128 | WIDTH: 128 129 | LAYER_SELECTOR: {} 130 | MAX_TRAIN_VOXELS: 25600 131 | PREV_FEAT: 132 | DIM: 1024 133 | RETINA_MAPPER: 134 | CONSTANT_SIGMA: 0.01 135 | VOXEL_OUTS: 136 | SHARED: 137 | MLP: 138 | DEPTH: 3 139 | WIDTH: 1024 140 | USE: false 141 | MODEL_SOUP: 142 | GREEDY_TARGET: heldout 143 | RECIPE: greedy 144 | USE: true 145 | OPTIMIZER: 146 | LR: 0.0003 147 | NAME: AdamW 148 | SCHEDULER: 149 | CYCLE_DECAY: 0.5 150 | CYCLE_LIMIT: 3 151 | K_DECAY: 1.5 152 | LR_MIN: 0.0003 153 | LR_MIN_WARMUP: 0.0001 154 | T_INITIAL: 1 155 | T_MULT: 1.0 156 | WARMUP_T: 10 157 | WEIGHT_DECAY: 0.0003 158 | POSITION_ENCODING: 159 | FEATURES: 32 160 | IN_DIM: 3 161 | MAX_STEPS: 1000 162 | PERIODS: 10000 163 | REGULARIZER: 164 | LAYER: 3.0e-05 165 | RESULTS_DIR: /nfscc/alg23/ray_results/ 166 | TRAINER: 167 | ACCUMULATE_GRAD_BATCHES: 1 168 | CALLBACKS: 169 | CHECKPOINT: 170 | LOAD_BEST_ON_END: false 171 | LOAD_BEST_ON_VAL: false 172 | REMOVE: true 173 | SAVE_TOP_K: 10 174 | EARLY_STOP: 175 | PATIENCE: 120 176 | DDP: false 177 | GRADIENT_CLIP_VAL: 0.5 178 | LIMIT_TRAIN_BATCHES: 0.05 179 | LIMIT_VAL_BATCHES: 0.25 180 | LOG_TRAIN_N_STEPS: 100 181 | MAX_EPOCHS: 1000 182 | MAX_STEPS: -1 183 | PRECISION: 16 184 | VAL_CHECK_INTERVAL: 1.0 185 | -------------------------------------------------------------------------------- /mem/configs/xvfe.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: xvdb 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - orig 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | - subj02 26 | - subj03 27 | - subj04 28 | - subj05 29 | - subj06 30 | - subj07 31 | - subj08 32 | DESCRIPTION: Default config 33 | EXPERIMENTAL: 34 | ANOTHER_SPLIT: false 35 | BEHV_ONLY: false 36 | BEHV_SELECTION: 37 | - -1 38 | BLANK_IMAGE: false 39 | NO_SPLIT: false 40 | SHUFFLE_IMAGES: false 41 | SHUFFLE_VAL: true 42 | STRAIGHT_FORWARD: false 43 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 44 | T_IMAGE: 0 45 | USE_DEV_MODEL: false 46 | USE_PREV_FRAME: true 47 | LOSS: 48 | DARK: 49 | MAX_EPOCH: 150 50 | USE: true 51 | NAME: SmoothL1Loss 52 | SMOOTH_L1_BETA: 0.01 53 | SYNC: 54 | EMA_BETA: 0.9 55 | EMA_BIAS_CORRECTION: false 56 | EMA_KEY: running_grad 57 | EXP_SCALE: 10.0 58 | EXP_SHIFT: 0.0 59 | LOG_SHIFT: 10.0 60 | SKIP_EPOCHS: 20 61 | STAGE: VAL 62 | UPDATE_RULE: raw 63 | USE: false 64 | MODEL: 65 | BACKBONE: 66 | ADAPTIVE_LN: 67 | SCALE: 0.5 68 | CACHE_DIR: /data/cache 69 | CLS_DIMS: 70 | - 1536 71 | - 1536 72 | - 1536 73 | - 768 74 | FEATURE_DIMS: 75 | - 768 76 | - 768 77 | - 768 78 | - 768 79 | LAYERS: 80 | - 2 81 | - 5 82 | - 8 83 | - 11 84 | LORA: 85 | RANK: 4 86 | SCALE: 0.2 87 | NAME: dinov2_vit_b 88 | BACKBONE_SMALL: 89 | ADAPTIVE_LN: 90 | SCALE: 0.5 91 | CLS_DIMS: 92 | - 384 93 | LAYERS: 94 | - 11 95 | LORA: 96 | RANK: 4 97 | SCALE: 0.2 98 | MERGE_WIDTH: 64 99 | NAME: dinov2_vit_s 100 | T_DIM: 64 101 | WIDTH: 64 102 | BOTTLENECK: 103 | OUT_DIM: 64 104 | RANK: -1 105 | COND: 106 | DIM: 256 107 | DROPOUT: 0.2 108 | IN_DIM: 35 109 | PASSTHROUGH_DIM: 32 110 | USE: true 111 | CONV_HEAD: 112 | DEPTHS: 113 | - 3 114 | - 3 115 | - 3 116 | - 3 117 | KERNEL_SIZES: 118 | - 5 119 | - 5 120 | - 5 121 | - 5 122 | MAX_DIM: 1024 123 | SIMPLE: true 124 | WIDTH: 192 125 | COORDS_MLP: 126 | DEPTH: 3 127 | LOG: false 128 | WIDTH: 128 129 | LAYER_SELECTOR: {} 130 | MAX_TRAIN_VOXELS: 25600 131 | PREV_FEAT: 132 | DIM: 1024 133 | RETINA_MAPPER: 134 | CONSTANT_SIGMA: 0.01 135 | VOXEL_OUTS: 136 | SHARED: 137 | MLP: 138 | DEPTH: 3 139 | WIDTH: 1024 140 | USE: false 141 | MODEL_SOUP: 142 | GREEDY_TARGET: heldout 143 | RECIPE: greedy 144 | USE: true 145 | OPTIMIZER: 146 | LR: 0.0003 147 | NAME: AdamW 148 | SCHEDULER: 149 | CYCLE_DECAY: 0.5 150 | CYCLE_LIMIT: 3 151 | K_DECAY: 1.5 152 | LR_MIN: 0.0003 153 | LR_MIN_WARMUP: 0.0001 154 | T_INITIAL: 1 155 | T_MULT: 1.0 156 | WARMUP_T: 10 157 | WEIGHT_DECAY: 0.0003 158 | POSITION_ENCODING: 159 | FEATURES: 32 160 | IN_DIM: 3 161 | MAX_STEPS: 1000 162 | PERIODS: 10000 163 | REGULARIZER: 164 | LAYER: 3.0e-05 165 | RESULTS_DIR: /nfscc/alg23/ray_results/ 166 | TRAINER: 167 | ACCUMULATE_GRAD_BATCHES: 1 168 | CALLBACKS: 169 | CHECKPOINT: 170 | LOAD_BEST_ON_END: false 171 | LOAD_BEST_ON_VAL: false 172 | REMOVE: true 173 | SAVE_TOP_K: 10 174 | EARLY_STOP: 175 | PATIENCE: 60 176 | DDP: false 177 | GRADIENT_CLIP_VAL: 0.5 178 | LIMIT_TRAIN_BATCHES: 0.1 179 | LIMIT_VAL_BATCHES: 0.5 180 | LOG_TRAIN_N_STEPS: 100 181 | MAX_EPOCHS: 500 182 | MAX_STEPS: -1 183 | PRECISION: 16 184 | VAL_CHECK_INTERVAL: 1.0 185 | -------------------------------------------------------------------------------- /mem/configs/xvff.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: xvdb 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | - subj02 26 | - subj03 27 | - subj04 28 | - subj05 29 | - subj06 30 | - subj07 31 | - subj08 32 | DESCRIPTION: ff stands for handcuff not f-key f-key 33 | EXPERIMENTAL: 34 | ANOTHER_SPLIT: false 35 | BEHV_ONLY: false 36 | BEHV_SELECTION: 37 | - -1 38 | BLANK_IMAGE: false 39 | NO_SPLIT: false 40 | SHUFFLE_IMAGES: false 41 | SHUFFLE_VAL: true 42 | STRAIGHT_FORWARD: false 43 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 44 | T_IMAGE: 0 45 | USE_DEV_MODEL: true 46 | USE_PREV_FRAME: false 47 | USE_RETINA_MAPPER: true 48 | USE_LAYER_SELECTOR: true 49 | USE_BHV: false 50 | USE_BHV_PASSTHROUGH: false 51 | BACKBONE_NOGRAD: false 52 | LOSS: 53 | DARK: 54 | MAX_EPOCH: 300 55 | USE: true 56 | NAME: SmoothL1Loss 57 | SMOOTH_L1_BETA: 0.01 58 | SYNC: 59 | EMA_BETA: 0.9 60 | EMA_BIAS_CORRECTION: false 61 | EMA_KEY: running_grad 62 | EXP_SCALE: 10.0 63 | EXP_SHIFT: 0.0 64 | LOG_SHIFT: 10.0 65 | SKIP_EPOCHS: 20 66 | STAGE: VAL 67 | UPDATE_RULE: raw 68 | USE: true 69 | MODEL: 70 | BACKBONE: 71 | ADAPTIVE_LN: 72 | SCALE: 0.5 73 | CACHE_DIR: /data/cache 74 | CLS_DIMS: 75 | - 1536 76 | - 1536 77 | - 1536 78 | - 768 79 | FEATURE_DIMS: 80 | - 768 81 | - 768 82 | - 768 83 | - 768 84 | LAYERS: 85 | - 2 86 | - 5 87 | - 8 88 | - 11 89 | LORA: 90 | RANK: 4 91 | SCALE: 0.2 92 | NAME: dinov2_vit_b 93 | BACKBONE_SMALL: 94 | ADAPTIVE_LN: 95 | SCALE: 0.5 96 | CLS_DIMS: 97 | - 384 98 | LAYERS: 99 | - 11 100 | LORA: 101 | RANK: 4 102 | SCALE: 0.2 103 | MERGE_WIDTH: 64 104 | NAME: dinov2_vit_s 105 | T_DIM: 64 106 | WIDTH: 64 107 | BOTTLENECK: 108 | OUT_DIM: 64 109 | RANK: -1 110 | COND: 111 | DIM: 256 112 | DROPOUT: 0.2 113 | IN_DIM: 35 114 | PASSTHROUGH_DIM: 32 115 | USE: true 116 | CONV_HEAD: 117 | DEPTHS: 118 | - 3 119 | - 3 120 | - 3 121 | - 3 122 | KERNEL_SIZES: 123 | - 5 124 | - 5 125 | - 5 126 | - 5 127 | MAX_DIM: 1024 128 | SIMPLE: true 129 | WIDTH: 192 130 | COORDS_MLP: 131 | DEPTH: 3 132 | LOG: false 133 | WIDTH: 128 134 | LAYER_SELECTOR: {} 135 | MAX_TRAIN_VOXELS: 25600 136 | PREV_FEAT: 137 | DIM: 1024 138 | RETINA_MAPPER: 139 | CONSTANT_SIGMA: 0.01 140 | VOXEL_OUTS: 141 | SHARED: 142 | MLP: 143 | DEPTH: 3 144 | WIDTH: 1024 145 | USE: false 146 | MODEL_SOUP: 147 | GREEDY_TARGET: heldout 148 | RECIPE: greedy 149 | USE: true 150 | OPTIMIZER: 151 | LR: 0.0003 152 | NAME: AdamW 153 | SCHEDULER: 154 | CYCLE_DECAY: 0.5 155 | CYCLE_LIMIT: 3 156 | K_DECAY: 1.5 157 | LR_MIN: 0.0003 158 | LR_MIN_WARMUP: 0.0001 159 | T_INITIAL: 1 160 | T_MULT: 1.0 161 | WARMUP_T: 10 162 | WEIGHT_DECAY: 0.0003 163 | POSITION_ENCODING: 164 | FEATURES: 32 165 | IN_DIM: 3 166 | MAX_STEPS: 1000 167 | PERIODS: 10000 168 | REGULARIZER: 169 | LAYER: 3.0e-05 170 | RESULTS_DIR: /nfscc/alg23/ray_results/ 171 | TRAINER: 172 | ACCUMULATE_GRAD_BATCHES: 1 173 | CALLBACKS: 174 | CHECKPOINT: 175 | LOAD_BEST_ON_END: false 176 | LOAD_BEST_ON_VAL: false 177 | REMOVE: true 178 | SAVE_TOP_K: 10 179 | EARLY_STOP: 180 | PATIENCE: 120 181 | DDP: false 182 | GRADIENT_CLIP_VAL: 0.5 183 | LIMIT_TRAIN_BATCHES: 0.05 184 | LIMIT_VAL_BATCHES: 0.25 185 | LOG_TRAIN_N_STEPS: 100 186 | MAX_EPOCHS: 1000 187 | MAX_STEPS: -1 188 | PRECISION: 16 189 | VAL_CHECK_INTERVAL: 1.0 190 | -------------------------------------------------------------------------------- /mem/configs/xvga.yaml: -------------------------------------------------------------------------------- 1 | ANALYSIS: 2 | DRAW_NEURON_LOCATION: false 3 | SAVE_NEURON_LOCATION: false 4 | CHECKPOINT_DIR: /data/ckpt/ 5 | DATAMODULE: 6 | BATCH_SIZE: 32 7 | FEATURE_EXTRACTOR_MODE: false 8 | NUM_WORKERS: 8 9 | PIN_MEMORY: true 10 | DATASET: 11 | CACHE_DIR: /data/cache 12 | DARK_POSTFIX: xvdb 13 | FILTER_BY_SESSION: 14 | - -1 15 | FMRI_SPACE: fsaverage 16 | IMAGE_RESOLUTION: 17 | - 224 18 | - 224 19 | N_PREV_FRAMES: 32 20 | ROIS: 21 | - all 22 | ROOT: /data/ALG23 23 | SUBJECT_LIST: 24 | - subj01 25 | - subj02 26 | - subj03 27 | - subj04 28 | - subj05 29 | - subj06 30 | - subj07 31 | - subj08 32 | DESCRIPTION: Default config 33 | EXPERIMENTAL: 34 | ANOTHER_SPLIT: false 35 | BEHV_ONLY: false 36 | BEHV_SELECTION: 37 | - -1 38 | BLANK_IMAGE: false 39 | NO_SPLIT: false 40 | SHUFFLE_IMAGES: false 41 | SHUFFLE_VAL: true 42 | STRAIGHT_FORWARD: false 43 | STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD: false 44 | T_IMAGE: 0 45 | USE_DEV_MODEL: true 46 | USE_PREV_FRAME: false 47 | USE_BHV: false 48 | USE_BHV_PASSTHROUGH: false 49 | LOSS: 50 | DARK: 51 | MAX_EPOCH: 100 52 | USE: true 53 | NAME: SmoothL1Loss 54 | SMOOTH_L1_BETA: 0.01 55 | SYNC: 56 | EMA_BETA: 0.9 57 | EMA_BIAS_CORRECTION: false 58 | EMA_KEY: running_grad 59 | EXP_SCALE: 10.0 60 | EXP_SHIFT: 0.0 61 | LOG_SHIFT: 10.0 62 | SKIP_EPOCHS: 20 63 | STAGE: VAL 64 | UPDATE_RULE: raw 65 | USE: true 66 | MODEL: 67 | BACKBONE: 68 | ADAPTIVE_LN: 69 | SCALE: 0.5 70 | CACHE_DIR: /data/cache 71 | CLS_DIMS: 72 | - 1536 73 | - 1536 74 | - 1536 75 | - 768 76 | FEATURE_DIMS: 77 | - 768 78 | - 768 79 | - 768 80 | - 768 81 | LAYERS: 82 | - 2 83 | - 5 84 | - 8 85 | - 11 86 | LORA: 87 | RANK: 4 88 | SCALE: 0.2 89 | NAME: dinov2_vit_b 90 | BACKBONE_SMALL: 91 | ADAPTIVE_LN: 92 | SCALE: 0.5 93 | CLS_DIMS: 94 | - 384 95 | LAYERS: 96 | - 11 97 | LORA: 98 | RANK: 4 99 | SCALE: 0.2 100 | MERGE_WIDTH: 64 101 | NAME: dinov2_vit_s 102 | T_DIM: 64 103 | WIDTH: 64 104 | BOTTLENECK: 105 | OUT_DIM: 64 106 | RANK: -1 107 | COND: 108 | DIM: 256 109 | DROPOUT: 0.2 110 | IN_DIM: 35 111 | PASSTHROUGH_DIM: 32 112 | USE: true 113 | CONV_HEAD: 114 | DEPTHS: 115 | - 3 116 | - 3 117 | - 3 118 | - 3 119 | KERNEL_SIZES: 120 | - 5 121 | - 5 122 | - 5 123 | - 5 124 | MAX_DIM: 1024 125 | SIMPLE: true 126 | WIDTH: 192 127 | COORDS_MLP: 128 | DEPTH: 3 129 | LOG: false 130 | WIDTH: 128 131 | LAYER_SELECTOR: {} 132 | MAX_TRAIN_VOXELS: 25600 133 | PREV_FEAT: 134 | DIM: 1024 135 | RETINA_MAPPER: 136 | CONSTANT_SIGMA: 0.01 137 | VOXEL_OUTS: 138 | SHARED: 139 | MLP: 140 | DEPTH: 3 141 | WIDTH: 1024 142 | USE: false 143 | MODEL_SOUP: 144 | GREEDY_TARGET: heldout 145 | RECIPE: greedy 146 | USE: true 147 | OPTIMIZER: 148 | LR: 0.0003 149 | NAME: AdamW 150 | SCHEDULER: 151 | CYCLE_DECAY: 0.5 152 | CYCLE_LIMIT: 3 153 | K_DECAY: 1.5 154 | LR_MIN: 0.0003 155 | LR_MIN_WARMUP: 0.0001 156 | T_INITIAL: 1 157 | T_MULT: 1.0 158 | WARMUP_T: 10 159 | WEIGHT_DECAY: 0.0003 160 | POSITION_ENCODING: 161 | FEATURES: 32 162 | IN_DIM: 3 163 | MAX_STEPS: 1000 164 | PERIODS: 10000 165 | REGULARIZER: 166 | LAYER: 3.0e-05 167 | RESULTS_DIR: /nfscc/alg23/ray_results/ 168 | TRAINER: 169 | ACCUMULATE_GRAD_BATCHES: 1 170 | CALLBACKS: 171 | CHECKPOINT: 172 | LOAD_BEST_ON_END: false 173 | LOAD_BEST_ON_VAL: false 174 | REMOVE: true 175 | SAVE_TOP_K: 10 176 | EARLY_STOP: 177 | PATIENCE: 50 178 | DDP: false 179 | GRADIENT_CLIP_VAL: 0.5 180 | LIMIT_TRAIN_BATCHES: 0.05 181 | LIMIT_VAL_BATCHES: 0.25 182 | LOG_TRAIN_N_STEPS: 100 183 | MAX_EPOCHS: 300 184 | MAX_STEPS: -1 185 | PRECISION: 16 186 | VAL_CHECK_INTERVAL: 1.0 187 | -------------------------------------------------------------------------------- /mem/dark_onemodel.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import copy 3 | import fnmatch 4 | from functools import partial 5 | import glob 6 | import operator 7 | import os 8 | import sys 9 | from typing import Dict 10 | 11 | import numpy as np 12 | import pytorch_lightning as pl 13 | import torch 14 | 15 | import shutil 16 | 17 | from tqdm import tqdm 18 | 19 | from config import AutoConfig 20 | from config_utils import load_from_yaml 21 | from datamodule import NSDDatamodule 22 | from plmodels import PlVEModel 23 | 24 | from read_utils import ( 25 | read_config, 26 | read_short_config, 27 | read_score_df, 28 | list_runs_from_exp_names, 29 | ) 30 | 31 | 32 | def build_dmt(run_dir): 33 | cfg: AutoConfig = read_config(run_dir) 34 | cfg.TRAINER.LIMIT_VAL_BATCHES = 1.0 35 | cfg.EXPERIMENTAL.SHUFFLE_VAL = False 36 | dm = NSDDatamodule(cfg) 37 | dm.setup() 38 | 39 | plmodel = PlVEModel(cfg, dm.roi_dict, dm.neuron_coords_dict) 40 | 41 | trainer = pl.Trainer( 42 | accelerator="cuda", 43 | devices=[0], 44 | precision=16, 45 | enable_progress_bar=False, 46 | ) 47 | 48 | return dm, plmodel, trainer 49 | 50 | 51 | @torch.no_grad() 52 | def get_outs(model, trainer, dataloader): 53 | outs = trainer.predict(model, dataloader) 54 | outs = torch.stack(sum(outs, [])) 55 | # outs = outs.cpu().numpy().astype(np.float16) 56 | outs = outs.cpu().half() 57 | return outs 58 | -------------------------------------------------------------------------------- /mem/exp_utils.py: -------------------------------------------------------------------------------- 1 | # %% 2 | from collections import OrderedDict 3 | import glob 4 | import json 5 | import sys 6 | import traceback 7 | import re 8 | import logging 9 | from time import sleep 10 | from einops import repeat 11 | import numpy as np 12 | 13 | import torch 14 | import os 15 | import pandas as pd 16 | import ray 17 | from ray import tune 18 | 19 | import torch.nn.functional as F 20 | 21 | import matplotlib.pyplot as plt 22 | from tqdm import tqdm 23 | import yaml 24 | from PIL import Image, ImageDraw 25 | 26 | import cortex 27 | from matplotlib.pyplot import cm 28 | from config import AutoConfig 29 | 30 | from config_utils import flatten_dict, load_from_yaml 31 | 32 | from IPython.display import display, HTML, clear_output 33 | 34 | from datamodule import NSDDatamodule, build_dm 35 | 36 | import glob 37 | 38 | plt.style.use("dark_background") 39 | # %% 40 | 41 | 42 | def load_cfg(run): 43 | path = glob.glob(run + "/**/hparams.yaml", recursive=True) 44 | # print(path) 45 | path = path[0] 46 | cfg = load_from_yaml(path) 47 | return cfg 48 | 49 | def load_voxel_metric(run, stage="TEST"): 50 | path = glob.glob(run + f"/**/stage={stage}*.npy", recursive=True) 51 | path = sorted(path) 52 | path = path[-1] 53 | # print(path) 54 | voxel_metric = np.load(path, allow_pickle=True).item() 55 | return voxel_metric 56 | 57 | # %% 58 | def list_runs_from_exp_names(exp_names, exp_dir="/nfscc/afo/ray_results", only_done=True): 59 | runs = [] 60 | for exp_name in exp_names: 61 | i_dir = os.path.join(exp_dir, exp_name) 62 | runs += os.listdir(i_dir) 63 | runs = [r for r in runs if os.path.isdir(os.path.join(i_dir, r))] 64 | runs = [os.path.join(i_dir, r) for r in runs] 65 | if only_done == True: 66 | filterer = lambda x: os.path.exists(os.path.join(x, "done")) 67 | runs = list(filter(filterer, runs)) 68 | runs = sorted(runs) 69 | return runs -------------------------------------------------------------------------------- /mem/loss.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Tuple 3 | 4 | import numpy as np 5 | import torch 6 | 7 | from config import AutoConfig 8 | from registry import Registry 9 | 10 | LOSS = Registry() 11 | 12 | 13 | @LOSS.register("MSELoss") 14 | def _mse(cfg: AutoConfig): 15 | return torch.nn.MSELoss(reduction='none') 16 | 17 | 18 | @LOSS.register("L1Loss") 19 | def _l1(cfg: AutoConfig): 20 | return torch.nn.L1Loss(reduction='none') 21 | 22 | 23 | @LOSS.register("SmoothL1Loss") 24 | def _smooth_l1(cfg: AutoConfig): 25 | return torch.nn.SmoothL1Loss(beta=cfg.LOSS.SMOOTH_L1_BETA, reduction='none') 26 | 27 | 28 | @LOSS.register("PoissonNLLLoss") 29 | def _poisson_nll(cfg: AutoConfig): 30 | return torch.nn.PoissonNLLLoss(reduction='none') 31 | 32 | 33 | def build_loss(cfg: AutoConfig): 34 | return LOSS[cfg.LOSS.NAME](cfg) 35 | -------------------------------------------------------------------------------- /mem/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import Tensor 3 | 4 | 5 | def vectorized_correlation(x: Tensor, y: Tensor) -> Tensor: 6 | """ 7 | 8 | :param x: Tensor shape [num_samples, num_voxels] 9 | :param y: Tensor shape [num_samples, num_voxels] 10 | :return: shape [num_voxels, ] 11 | """ 12 | 13 | dim = 0 14 | centered_x = x - x.mean(dim, keepdims=True) 15 | centered_y = y - y.mean(dim, keepdims=True) 16 | 17 | covariance = (centered_x * centered_y).sum(dim, keepdims=True) 18 | 19 | bessel_corrected_covariance = covariance / (x.shape[dim] - 1) 20 | 21 | x_std = x.std(dim, keepdims=True) + 1e-8 22 | y_std = y.std(dim, keepdims=True) + 1e-8 23 | 24 | corr = bessel_corrected_covariance / (x_std * y_std) 25 | 26 | return corr.squeeze(0) 27 | 28 | 29 | class EpochMetric: 30 | def __init__( 31 | self, 32 | fn: vectorized_correlation = None, 33 | device=None, 34 | ): 35 | self.reset() 36 | self.fn = fn 37 | self.device = device 38 | 39 | def reset(self): 40 | self._preds = [] 41 | self._targets = [] 42 | 43 | def update(self, pred: Tensor, target: Tensor): 44 | self._preds.append(pred.detach().to(self.device)) 45 | self._targets.append(target.detach().to(self.device)) 46 | 47 | def compute(self): 48 | if self._preds[0].ndim == 1: 49 | preds = torch.stack(self._preds, dim=0) 50 | elif self._preds[0].ndim == 2 and self._preds[0].shape[0] == 1: 51 | preds = torch.cat(self._preds, dim=0) 52 | else: 53 | raise ValueError("preds must be 1D or 2D with shape [1, num_voxels]") 54 | 55 | if self._targets[0].ndim == 1: 56 | targets = torch.stack(self._targets, dim=0) 57 | elif self._targets[0].ndim == 2 and self._targets[0].shape[0] == 1: 58 | targets = torch.cat(self._targets, dim=0) 59 | else: 60 | raise ValueError("targets must be 1D or 2D with shape [1, num_voxels]") 61 | 62 | return self.fn(preds, targets) 63 | 64 | def __call__(self, value): 65 | self.update(value) 66 | return self.compute() 67 | -------------------------------------------------------------------------------- /mem/neck.py: -------------------------------------------------------------------------------- 1 | from einops import rearrange 2 | from typing import Any, Dict, List, Optional, Tuple 3 | from config import AutoConfig 4 | import torch 5 | from torch import Tensor, nn 6 | 7 | from topyneck import TopyNeck 8 | 9 | from registry import Registry 10 | 11 | NECK = Registry() 12 | 13 | NECK.register("TopyNeck", TopyNeck) 14 | 15 | def build_neck( 16 | cfg: AutoConfig, 17 | c_dict: Dict[str, int], 18 | num_voxel_dict: Dict[str, int], 19 | neuron_coords_dict: Dict[str, Tensor], 20 | ): 21 | neck = NECK[cfg.MODEL.NECK.NAME](cfg, c_dict, num_voxel_dict, neuron_coords_dict) 22 | 23 | return neck 24 | -------------------------------------------------------------------------------- /mem/optimizers.py: -------------------------------------------------------------------------------- 1 | from torch.optim import Adam, AdamW, SGD 2 | from torch.optim.lr_scheduler import ( 3 | StepLR, 4 | MultiStepLR, 5 | CosineAnnealingLR, 6 | CosineAnnealingWarmRestarts, 7 | ReduceLROnPlateau, 8 | ) 9 | 10 | from registry import Registry 11 | 12 | OPTIMIZER_REGISTRY = Registry() 13 | 14 | 15 | @OPTIMIZER_REGISTRY.register("AdamW") 16 | def _adamw(cfg, optimizer_grouped_parameters): 17 | return AdamW( 18 | optimizer_grouped_parameters, 19 | lr=cfg.OPTIMIZER.LR, 20 | ) 21 | 22 | 23 | @OPTIMIZER_REGISTRY.register("AdaBelief") 24 | def _adamb(cfg, optimizer_grouped_parameters): 25 | from adabelief_pytorch import AdaBelief 26 | 27 | return AdaBelief( 28 | optimizer_grouped_parameters, 29 | lr=cfg.OPTIMIZER.LR, 30 | print_change_log=False, 31 | ) 32 | 33 | 34 | @OPTIMIZER_REGISTRY.register("SGD") 35 | def _sgd(cfg, optimizer_grouped_parameters): 36 | return SGD( 37 | optimizer_grouped_parameters, 38 | momentum=0.9, 39 | lr=cfg.OPTIMIZER.LR, 40 | ) 41 | 42 | 43 | def build_optimizer(cfg, optimizer_grouped_parameters): 44 | p_list = list(optimizer_grouped_parameters) 45 | optimizer = OPTIMIZER_REGISTRY[cfg.OPTIMIZER.NAME](cfg, p_list) 46 | 47 | ### milestone scheduler 48 | # warmup_steps = cfg.OPTIMIZER.WARMUP_STEPS 49 | # milemilestones = cfg.OPTIMIZER.LR_DECAY_STEP 50 | # decay = cfg.OPTIMIZER.LR_DECAY_RATE 51 | # assert len(milemilestones) == len(decay) 52 | 53 | # def warmup(current_step: int): 54 | # if current_step < warmup_steps: # current_step / warmup_steps * base_lr 55 | # return float(current_step / warmup_steps) 56 | # if current_step in milemilestones: 57 | # return decay[milemilestones.index(current_step)] 58 | # return 1.0 59 | 60 | # from torch.optim.lr_scheduler import LambdaLR 61 | 62 | # scheduler = LambdaLR(optimizer, lr_lambda=warmup) 63 | 64 | from timm.scheduler.cosine_lr import CosineLRScheduler 65 | 66 | # scheduler = CosineLRScheduler( 67 | # optimizer, 68 | # t_initial=30, 69 | # lr_min=1e-4, 70 | # cycle_mul=1, 71 | # cycle_decay=0.3, 72 | # cycle_limit=100, 73 | # warmup_t=10, 74 | # warmup_lr_init=1e-3, 75 | # k_decay=1, 76 | # ) 77 | if cfg.OPTIMIZER.SCHEDULER.T_INITIAL == 1: 78 | cfg.OPTIMIZER.SCHEDULER.LR_MIN = cfg.OPTIMIZER.LR 79 | 80 | scheduler = CosineLRScheduler( 81 | optimizer, 82 | t_initial=cfg.OPTIMIZER.SCHEDULER.T_INITIAL, 83 | lr_min=cfg.OPTIMIZER.SCHEDULER.LR_MIN, 84 | cycle_mul=cfg.OPTIMIZER.SCHEDULER.T_MULT, 85 | cycle_decay=cfg.OPTIMIZER.SCHEDULER.CYCLE_DECAY, 86 | cycle_limit=cfg.OPTIMIZER.SCHEDULER.CYCLE_LIMIT, 87 | warmup_t=cfg.OPTIMIZER.SCHEDULER.WARMUP_T, 88 | warmup_lr_init=cfg.OPTIMIZER.SCHEDULER.LR_MIN_WARMUP, 89 | warmup_prefix=False, 90 | k_decay=cfg.OPTIMIZER.SCHEDULER.K_DECAY, 91 | ) 92 | return [optimizer], [{"scheduler": scheduler, "interval": "epoch"}] 93 | -------------------------------------------------------------------------------- /mem/point_pe.py: -------------------------------------------------------------------------------- 1 | # https://gist.github.com/xmodar/ae2d94681a6fda39f3c4f3ac91eef7b7 2 | # %% 3 | import torch 4 | 5 | 6 | def sinusoidal(positions, features=16, periods=10000): 7 | """Encode `positions` using sinusoidal positional encoding 8 | 9 | Args: 10 | positions: tensor of positions 11 | features: half the number of features per position 12 | periods: used frequencies for the sinusoidal functions 13 | 14 | Returns: 15 | Positional encoding of shape `(*positions.shape, features, 2)` 16 | """ 17 | dtype = positions.dtype if positions.is_floating_point() else None 18 | kwargs = dict(device=positions.device, dtype=dtype) 19 | omega = torch.logspace(0, 1 / features - 1, features, periods, **kwargs) 20 | fraction = omega * positions.unsqueeze(-1) 21 | return torch.stack((fraction.sin(), fraction.cos()), dim=-1) 22 | 23 | 24 | def point_pe(points, low=0, high=1, steps=100, features=16, periods=10000): 25 | """Encode points in bounded space using sinusoidal positional encoding 26 | 27 | Args: 28 | points: tensor of points; typically of shape (*, C) 29 | low: lower bound of the space; typically of shape (C,) 30 | high: upper bound of the space; typically of shape (C,) 31 | steps: number of cells that split the space; typically of shape (C,) 32 | features: half the number of features per position 33 | periods: used frequencies for the sinusoidal functions 34 | 35 | Returns: 36 | Positional encoded points of the following shape: 37 | `(*points.shape[:-1], points.shape[-1] * features * 2)` 38 | """ 39 | positions = (points - low).mul_(steps / (high - low)) 40 | return sinusoidal(positions, features, periods).flatten(-3) 41 | 42 | 43 | def point_position_encoding(points, max_steps=100, features=16, periods=10000): 44 | low = points.min(0).values 45 | high = points.max(0).values 46 | steps = high - low 47 | steps *= max_steps / steps.max() 48 | pe = point_pe(points, low, high, steps, features, periods) 49 | return pe 50 | 51 | 52 | def test(num_points=1000, max_steps=100, features=32, periods=10000): 53 | """Test point_pe""" 54 | point_cloud = torch.rand(num_points, 3) 55 | low = point_cloud.min(0).values 56 | high = point_cloud.max(0).values 57 | steps = high - low 58 | steps *= max_steps / steps.max() 59 | # print(point_pe(point_cloud, low, high, steps).shape) 60 | pe = point_pe(point_cloud, low, high, steps, features=features, periods=periods) 61 | return pe 62 | 63 | 64 | # %% 65 | if __name__ == "__main__": 66 | pe = test(20, 1000, periods=10000) 67 | 68 | import matplotlib.pyplot as plt 69 | 70 | fig = plt.figure(figsize=(10, 10)) 71 | plt.imshow(pe) 72 | # %% 73 | 74 | 75 | def pe_2d(num_points=14, max_steps=100, features=32, periods=10000): 76 | x = torch.linspace(0, 1, num_points) 77 | y = torch.linspace(0, 1, num_points) 78 | points = torch.stack(torch.meshgrid(x, y), dim=-1).reshape(-1, 2) 79 | # print(points) 80 | # print(points.shape) 81 | low = points.min(0).values 82 | high = points.max(0).values 83 | steps = high - low 84 | steps *= max_steps / steps.max() 85 | # print(point_pe(point_cloud, low, high, steps).shape) 86 | pe = point_pe(points, low, high, steps, features=features, periods=periods) 87 | pe = pe.reshape(num_points, num_points, -1) 88 | pe = pe.permute(2, 0, 1) 89 | return pe 90 | 91 | 92 | # %% 93 | if __name__ == "__main__": 94 | pe = pe_2d(3, max_steps=1000, periods=10000, features=32) 95 | 96 | import matplotlib.pyplot as plt 97 | 98 | fig = plt.figure(figsize=(10, 10)) 99 | plt.imshow(pe[64, :, :]) 100 | # %% 101 | -------------------------------------------------------------------------------- /mem/prepare_cache.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from tqdm import tqdm 4 | 5 | 6 | model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitl14") 7 | model = model.cuda() 8 | model.eval() 9 | 10 | import os 11 | import torch 12 | from PIL import Image 13 | from torch.utils.data import Dataset, DataLoader 14 | from torchvision import transforms 15 | 16 | class ImageDataset(Dataset): 17 | def __init__(self, data_dir, transform=None): 18 | self.data_dir = data_dir 19 | self.transform = transform 20 | self.image_files = sorted(os.listdir(data_dir)) 21 | 22 | def __len__(self): 23 | return len(self.image_files) 24 | 25 | def __getitem__(self, idx): 26 | image_path = os.path.join(self.data_dir, self.image_files[idx]) 27 | image = Image.open(image_path).convert("RGB") 28 | 29 | if self.transform: 30 | image = self.transform(image) 31 | 32 | return image, self.image_files[idx] 33 | 34 | # Specify the directory containing the images 35 | # data_dir = "/data/ALG23/images" 36 | # save_dir = "/data/ALG23/feats" 37 | import argparse 38 | 39 | parser = argparse.ArgumentParser() 40 | parser.add_argument("--data_dir", type=str, default="/data/ALG23/images") 41 | parser.add_argument("--save_dir", type=str, default="/data/ALG23/feats") 42 | args = parser.parse_args() 43 | save_dir = args.save_dir 44 | data_dir = args.data_dir 45 | 46 | os.makedirs(save_dir, exist_ok=True) 47 | # Define the transformations to be applied to the images 48 | transform = transforms.Compose([ 49 | transforms.Resize((224, 224)), 50 | transforms.ToTensor(), 51 | transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 52 | ]) 53 | 54 | # Create the dataset 55 | dataset = ImageDataset(data_dir, transform=transform) 56 | 57 | # Create the dataloader 58 | dataloader = DataLoader(dataset, batch_size=32, shuffle=False) 59 | 60 | with torch.no_grad(): 61 | for batch in tqdm(dataloader): 62 | image = batch[0].cuda() 63 | feats = model(image) 64 | paths = batch[1] 65 | 66 | for i, path in enumerate(paths): 67 | path = path.split(".")[0] + ".npy" 68 | feat = feats[i].cpu().numpy().astype(np.float16) 69 | np.save(os.path.join(save_dir, path), feat) -------------------------------------------------------------------------------- /mem/read_utils.py: -------------------------------------------------------------------------------- 1 | # %% 2 | import glob 3 | import json 4 | import sys 5 | import traceback 6 | import re 7 | import logging 8 | from time import sleep 9 | import numpy as np 10 | 11 | import torch 12 | import os 13 | import pandas as pd 14 | import ray 15 | from ray import tune 16 | 17 | import matplotlib.pyplot as plt 18 | from tqdm import tqdm 19 | import yaml 20 | from PIL import Image, ImageDraw 21 | 22 | import cortex 23 | from matplotlib.pyplot import cm 24 | 25 | from config_utils import flatten_dict, load_from_yaml 26 | 27 | from IPython.display import display, HTML, clear_output 28 | 29 | plt.style.use("dark_background") 30 | 31 | def set_display(): 32 | pd.options.display.float_format = "{:,.4f}".format 33 | pd.options.display.max_colwidth = 1000 34 | pd.options.display.max_rows = 1000 35 | pd.options.display.max_columns = 1000 36 | 37 | 38 | def pretty_print(df): 39 | df.style.set_properties(**{"white-space": "pre"}) 40 | return display(HTML(df.to_html().replace("\\n", "
"))) 41 | 42 | def read_config(run): 43 | cfg_path = glob.glob(os.path.join(run, "**/hparams.yaml"), recursive=True)[0] 44 | return load_from_yaml(cfg_path) 45 | 46 | def read_short_config(run): 47 | json_path = glob.glob(os.path.join(run, "**/params.json"), recursive=True)[0] 48 | cfg = json.load(open(json_path, "r")) 49 | return cfg 50 | 51 | def read_score_df(run): 52 | try: 53 | csv_path = glob.glob(os.path.join(run, "**/metrics.csv"), recursive=True)[0] 54 | return pd.read_csv(csv_path) 55 | except: 56 | logging.warning(f"Could not find metrics.csv in {run}") 57 | return None 58 | 59 | def read_test_voxel_score(run): 60 | # /nfscc/ray_results/hunt_behavior/full_bhv/t074f6_00000_DATASET.SUBJECT_LIST=subj01MODEL.BACKBONE.ADAPTIVE_LN.SCALE=0.5/lightning_logs/voxel_metric/stage=TEST.step=000000009028.pkl.npy 61 | vs_path = glob.glob(os.path.join(run, "**/voxel_metric/stage=TEST.step=*.pkl.npy"), recursive=True)[0] 62 | return np.load(vs_path, allow_pickle=True).item() 63 | 64 | def read_val_voxel_score(run): 65 | vs_path = glob.glob(os.path.join(run, "**/voxel_metric/stage=VAL.step=*.pkl.npy"), recursive=True) 66 | vs_path = sorted(vs_path)[-1] 67 | return np.load(vs_path, allow_pickle=True).item() 68 | 69 | 70 | def list_runs_from_exp_names(exp_names, exp_dir="/nfscc/ray_results/saved"): 71 | runs = [] 72 | for exp_name in exp_names: 73 | i_dir = os.path.join(exp_dir, exp_name) 74 | runs += os.listdir(i_dir) 75 | runs = [r for r in runs if os.path.isdir(os.path.join(i_dir, r))] 76 | runs = [os.path.join(i_dir, r) for r in runs] 77 | runs = sorted(runs) 78 | return runs 79 | 80 | 81 | def find_runs_from_exp_dir(exp_dir): 82 | exp_names = os.listdir(exp_dir) 83 | runs = list_runs_from_exp_names(exp_names, exp_dir) 84 | runs = sorted(runs) 85 | return runs -------------------------------------------------------------------------------- /mem/registry.py: -------------------------------------------------------------------------------- 1 | def _register_generic(module_dict, module_name, module): 2 | assert module_name not in module_dict 3 | module_dict[module_name] = module 4 | 5 | 6 | class Registry(dict): 7 | """ 8 | A helper class for managing registering modules, it extends a dictionary 9 | and provides a register functions. 10 | Eg. creating a registry: 11 | some_registry = Registry({"default": default_module}) 12 | There're two ways of registering new modules: 13 | 1): normal way is just calling register function: 14 | def foo(): 15 | ... 16 | some_registry.register("foo_module", foo) 17 | 2): used as decorator when declaring the module: 18 | @some_registry.register("foo_module") 19 | @some_registry.register("foo_modeul_nickname") 20 | def foo(): 21 | ... 22 | Access of module is just like using a dictionary, eg: 23 | f = some_registry["foo_modeul"] 24 | """ 25 | 26 | def __init__(self, *args, **kwargs): 27 | super(Registry, self).__init__(*args, **kwargs) 28 | 29 | def register(self, module_name, module=None): 30 | # used as function call 31 | if module is not None: 32 | _register_generic(self, module_name, module) 33 | return 34 | 35 | # used as decorator 36 | def register_fn(fn): 37 | _register_generic(self, module_name, fn) 38 | return fn 39 | 40 | return register_fn 41 | -------------------------------------------------------------------------------- /mem/save_config.py: -------------------------------------------------------------------------------- 1 | # %% 2 | import os 3 | from config_utils import get_cfg_defaults, save_to_yaml 4 | from config import AutoConfig 5 | # # %% 6 | 7 | 8 | _C = get_cfg_defaults() 9 | 10 | # path = "/workspace/configs/dino_t1.yaml" 11 | # _C.merge_from_file(path) 12 | path = "/workspace/configs/xvaa.yaml" 13 | save_to_yaml(_C, path) -------------------------------------------------------------------------------- /mem/scripts_heavy/do_one_job.sh: -------------------------------------------------------------------------------- 1 | # python file as arg 2 | x=$1 3 | echo "LOG START" > /tmp/log 4 | while true; do 5 | echo "======================" >> /tmp/log 6 | echo $(date) >> /tmp/log 7 | echo "Running ${x}" >> /tmp/log 8 | starttime=$(date +%s) 9 | python -u ${x} >> /tmp/log 2>&1 10 | wait 11 | endtime=$(date +%s) 12 | echo "Finished ${x}" >> /tmp/log 13 | timeelapsed=$((endtime - starttime)) 14 | echo "Time elapsed: ${timeelapsed}" >> /tmp/log 15 | echo "======================" >> /tmp/log 16 | timethreshold=300 17 | if [ $timeelapsed -gt $timethreshold ]; then 18 | echo "Time elapsed ${timeelapsed} is greater than threshold ${timethreshold}" >> /tmp/log 19 | echo "Sleeping for 10 seconds" 20 | sleep 10 21 | else 22 | echo "Time elapsed ${timeelapsed} is less than threshold ${timethreshold}" >> /tmp/log 23 | echo "go to vacation" >> /tmp/log 24 | break 25 | fi 26 | done 27 | echo "LOG END" >> /tmp/log 28 | 29 | sleep infinity -------------------------------------------------------------------------------- /mem/scripts_heavy/do_start_jobs.sh: -------------------------------------------------------------------------------- 1 | # /bin/bash 2 | 3 | if [[ "$#" -lt 4 ]]; then 4 | echo "no argument, using default values..." 5 | repeat_check=3 6 | sm_th=10 7 | mem_th=10 8 | x="test.py" 9 | else 10 | repeat_check=$1 11 | sm_th=$2 12 | mem_th=$3 13 | x=$4 14 | fi 15 | 16 | echo "Finding Available GPUs:" 17 | 18 | 19 | NUM_GPUS=`nvidia-smi --list-gpus | wc -l` 20 | echo "Total $NUM_GPUS GPUs found" 21 | 22 | for (( i=$NUM_GPUS-1; i>=0; i-- )); do 23 | total_sm=0 24 | max_sm=0 25 | for (( j=0; j<$repeat_check; j++)); do 26 | sm=`nvidia-smi --query-gpu=utilization.gpu --format=csv,noheader,nounits -i $i` 27 | echo "$(date) GPU $i is $sm% busy" 28 | total_sm=$(expr "$total_sm" + "$sm") 29 | if [[ "$sm" -gt "$max_sm" ]]; then 30 | max_sm=$sm 31 | fi 32 | sleep 1 33 | done 34 | sm=$total_sm 35 | avg_sm=$(expr "$sm" / "$repeat_check") 36 | echo "GPU $i is average $avg_sm% busy, max $max_sm% busy" 37 | 38 | mem_used=`nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i $i` 39 | mem_total=`nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits -i $i` 40 | mem_percent=$(expr "$mem_used" \* 100 / "$mem_total") 41 | echo "GPU $i has $mem_percent% memory used" 42 | 43 | if [[ "$max_sm" -lt "$sm_th" ]] && [[ "$mem_percent" -lt "$mem_th" ]]; then 44 | echo "available, starting job..." 45 | docker stop sspy_$i 46 | # wait for stop 47 | while [ "$(docker ps -aq -f name=sspy_$i)" != "" ]; do 48 | sleep 1 49 | done 50 | docker run -d --rm \ 51 | --shm-size 64G \ 52 | --gpus device=$i \ 53 | -v /home/huze/nfscc:/nfscc \ 54 | -v /home/huze/workspace:/workspace \ 55 | -v /home/huze/data:/data \ 56 | -v /home/huze/data/.cache:/root/.cache \ 57 | --name sspy_$i \ 58 | --entrypoint /bin/bash huzeeee/afo:latest do_one_job.sh $x 59 | else 60 | echo "busy, skipping..." 61 | fi 62 | done 63 | 64 | echo "Done" -------------------------------------------------------------------------------- /mem/scripts_heavy/do_stop_donejobs.sh: -------------------------------------------------------------------------------- 1 | # /bin/bash 2 | 3 | if [[ "$#" -lt 3 ]]; then 4 | echo "no argument, using default values..." 5 | repeat_check=3 6 | sm_th=10 7 | mem_th=10 8 | else 9 | repeat_check=$1 10 | sm_th=$2 11 | mem_th=$3 12 | fi 13 | 14 | echo "Finding Available GPUs:" 15 | 16 | 17 | NUM_GPUS=`nvidia-smi --list-gpus | wc -l` 18 | echo "Total $NUM_GPUS GPUs found" 19 | 20 | for (( i=$NUM_GPUS-1; i>=0; i-- )); do 21 | total_sm=0 22 | max_sm=0 23 | for (( j=0; j<$repeat_check; j++)); do 24 | sm=`nvidia-smi --query-gpu=utilization.gpu --format=csv,noheader,nounits -i $i` 25 | echo "$(date) GPU $i is $sm% busy" 26 | total_sm=$(expr "$total_sm" + "$sm") 27 | if [[ "$sm" -gt "$max_sm" ]]; then 28 | max_sm=$sm 29 | fi 30 | sleep 1 31 | done 32 | sm=$total_sm 33 | avg_sm=$(expr "$sm" / "$repeat_check") 34 | echo "GPU $i is average $avg_sm% busy, max $max_sm% busy" 35 | 36 | mem_used=`nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i $i` 37 | mem_total=`nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits -i $i` 38 | mem_percent=$(expr "$mem_used" \* 100 / "$mem_total") 39 | echo "GPU $i has $mem_percent% memory used" 40 | 41 | if [[ "$max_sm" -lt "$sm_th" ]] && [[ "$mem_percent" -lt "$mem_th" ]]; then 42 | echo "available, stopping job..." 43 | docker stop sspy_$i 44 | else 45 | echo "busy, skipping..." 46 | fi 47 | done 48 | 49 | echo "Done" -------------------------------------------------------------------------------- /mem/scripts_heavy/do_stop_jobs.sh: -------------------------------------------------------------------------------- 1 | NUM_GPUS=`nvidia-smi --list-gpus | wc -l` 2 | 3 | echo "NUM_GPUS" 4 | echo $NUM_GPUS 5 | 6 | for ((i=0; i<$NUM_GPUS; i++)); do 7 | docker stop sspy_$i 8 | done -------------------------------------------------------------------------------- /mem/scripts_heavy/sync_ckpt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Prompt the user for the directory path (with a default value of '/data/ckpt') 4 | read -p "Enter the directory path [default: /data/ckpt]: " directory 5 | directory=${directory:-"/data/ckpt"} 6 | 7 | # Check if the directory exists 8 | if [ ! -d "$directory" ]; then 9 | echo "Directory not found!" 10 | exit 1 11 | fi 12 | 13 | # Prompt the user for the remote machine details (with default values) 14 | read -p "Enter the remote machine IP [default: 114.514.1919.810]: " remote_ip 15 | remote_ip=${remote_ip:-"114.514.1919.810"} 16 | 17 | read -p "Enter the remote machine username [default: huze]: " remote_username 18 | remote_username=${remote_username:-"huze"} 19 | 20 | read -p "Enter the remote machine destination directory [default: /data/huze/dckpt]: " remote_directory 21 | remote_directory=${remote_directory:-"/data/huze/dckpt"} 22 | 23 | # Check if the remote machine is accessible 24 | ping -c 1 "$remote_ip" > /dev/null 2>&1 25 | if [ $? -ne 0 ]; then 26 | echo "Remote machine is not accessible!" 27 | exit 1 28 | fi 29 | 30 | # Function to walk through the directory recursively 31 | walk_directory() { 32 | local dir="$1" 33 | 34 | # Loop through all the files and subdirectories in the current directory 35 | for file in "$dir"/*; do 36 | # Check if the item is a directory 37 | if [ -d "$file" ]; then 38 | # Check if the directory contains a 'done' file 39 | if [ -f "$file/done" ]; then 40 | echo "Syncing folder: $file" 41 | # Rsync the folder to the remote machine 42 | rsync -avz --progress "$file" "$remote_username@$remote_ip:$remote_directory" 43 | fi 44 | # Call the function recursively for the subdirectory 45 | walk_directory "$file" 46 | fi 47 | done 48 | } 49 | 50 | # Call the function to walk through the directory 51 | walk_directory "$directory" -------------------------------------------------------------------------------- /mem/scripts_heavy/xvaa_topyneck.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | 37 | return parser 38 | 39 | 40 | @my_nfs_cluster_job 41 | def job(tune_dict, cfg, progress=False, **kwargs): 42 | if "row" in tune_dict: 43 | global ROW_LIST 44 | row = tune_dict["row"] 45 | tune_dict.pop("row") 46 | print(ROW_LIST[row]) 47 | tune_dict.update(ROW_LIST[row]) 48 | 49 | cfg.merge_from_list(dict_to_list(tune_dict)) 50 | 51 | 52 | cfg = max_batch_size(cfg) 53 | 54 | ret = simple_train( 55 | cfg=cfg, 56 | progress=progress, 57 | rm_soup=False, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | # print traceback 87 | import traceback 88 | 89 | traceback.print_exc() 90 | 91 | 92 | # ROW_LIST = [ 93 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 94 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 95 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 96 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 97 | # ] 98 | # - 99 | if __name__ == "__main__": 100 | parser = get_parser() 101 | args = parser.parse_args() 102 | t = None 103 | 104 | cfg = load_from_yaml("/workspace/configs/xvaa.yaml") 105 | # cfg.OPTIMIZER.SCHEDULER.WARMUP_T = 3 106 | # cfg.OPTIMIZER.LR = 1e-3 107 | # cfg.TRAINER.CALLBACKS.EARLY_STOP.PATIENCE = 20 108 | 109 | cfg.RESULTS_DIR = "/nfscc/alg23/xvaa/" 110 | 111 | tune_config = { 112 | # "DATASET.SUBJECT_LIST": tune.grid_search([['subj05'], ['subj08']]), 113 | "REGULARIZER.LAYER": tune.grid_search([3e-4, 1e-4, 3e-5]), # inspect layer selector histogram and pick the best, manually 114 | "DATASET.SUBJECT_LIST": tune.grid_search([['subj01'], ['subj02'], ['subj03'], ['subj04'], ['subj05'], ['subj06'], ['subj07'], ['subj08']][::-1]), 115 | } 116 | name = f"topyneck" 117 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 118 | -------------------------------------------------------------------------------- /mem/scripts_heavy/xvab_gather.py: -------------------------------------------------------------------------------- 1 | # %% 2 | import argparse 3 | import os 4 | from matplotlib import ticker 5 | import numpy as np 6 | from sympy import Line2D 7 | from config import AutoConfig 8 | from config_utils import get_cfg_defaults 9 | import matplotlib.pyplot as plt 10 | import pandas as pd 11 | import torch 12 | 13 | from read_utils import read_config, read_short_config, read_score_df, list_runs_from_exp_names 14 | # %% 15 | def get_parser(): 16 | parser = argparse.ArgumentParser(description="Ray Tune") 17 | parser.add_argument("--save_dir", type=str, default="/nfscc/alg23/xvab/mem", help="save dir") 18 | parser.add_argument("--exp_dir", type=str, default="/nfscc/alg23/xvaa/topyneck", help="exp dir") 19 | return parser 20 | args = get_parser().parse_args() 21 | # %% 22 | save_dir = args.save_dir 23 | 24 | run_dirs = list_runs_from_exp_names( 25 | [""], exp_dir=f"{args.exp_dir}" 26 | ) 27 | print(len(run_dirs)) 28 | 29 | # %% 30 | datas = [] 31 | for run in run_dirs: 32 | cfg: AutoConfig = read_config(run) 33 | subject = cfg.DATASET.SUBJECT_LIST[0] 34 | reg = cfg.REGULARIZER.LAYER 35 | val_score = torch.load(os.path.join(run, "soup_val_score.pth")) 36 | test_score = torch.load(os.path.join(run, "soup_test_score.pth")) 37 | datas.append([subject, reg, val_score, test_score, run]) 38 | df = pd.DataFrame(datas, columns=["subject", "reg", "val_score", 'test_score', 'run']) 39 | # add mean_score column 40 | df['mean_score'] = df[['val_score', 'test_score']].mean(axis=1) 41 | # %% 42 | topyneck_dict = {} 43 | for subject in df.subject.unique(): 44 | # find best reg 45 | df_subject = df[df.subject == subject] 46 | best_reg = df_subject[df_subject.mean_score == df_subject.mean_score.max()].reg.values[0] 47 | print(f"subject: {subject}, best reg: {best_reg}, best score: {df_subject.mean_score.max()}") 48 | best_run = df_subject[df_subject.mean_score == df_subject.mean_score.max()].run.values[0] 49 | 50 | sd = torch.load(os.path.join(best_run, "soup.pth"), map_location=torch.device('cpu')) 51 | names = ['retina_mapper', 'layer_selector'] 52 | sd = {k: v for k, v in sd.items() if any(name in k for name in names)} 53 | topyneck_dict.update(sd) 54 | os.makedirs(save_dir, exist_ok=True) 55 | save_path = os.path.join(save_dir, "topyneck.pth") 56 | torch.save(topyneck_dict, save_path) 57 | print(f'saved to {save_path}') -------------------------------------------------------------------------------- /mem/scripts_heavy/xvba_soup.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, modular_train, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | parser.add_argument( 40 | "--topyneck_path", type=str, default="/nfscc/alg23/xvab/mem/topyneck.pth", help="Path to topyneck" 41 | ) 42 | return parser 43 | 44 | 45 | @my_nfs_cluster_job 46 | def job(tune_dict, cfg, progress=False, **kwargs): 47 | topyneck_path = kwargs.pop('topyneck_path') 48 | 49 | cfg.merge_from_list(dict_to_list(tune_dict)) 50 | 51 | cfg = max_batch_size(cfg) 52 | 53 | ret = simple_train( # todo 54 | cfg=cfg, 55 | progress=progress, 56 | topyneck_path=topyneck_path, 57 | rm_soup=False, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None, topyneck_path=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress, topyneck_path=topyneck_path), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | import traceback 87 | 88 | traceback.print_exc() 89 | 90 | 91 | if __name__ == "__main__": 92 | parser = get_parser() 93 | args = parser.parse_args() 94 | t = args.time if args.time > 0 else None 95 | 96 | cfg = load_from_yaml("/workspace/configs/xvba.yaml") 97 | cfg.RESULTS_DIR = "/nfscc/alg23/xvba/" 98 | # cfg.DATAMODULE.BATCH_SIZE = 32 99 | # cfg.OPTIMIZER.LR = 1e-3 100 | 101 | tune_config = { 102 | # "DATASET.SUBJECT_LIST": tune.grid_search([["subj01"]]), 103 | # "LOSS.SYNC.USE": tune.grid_search([True, False]), 104 | # ["RSC", "E", "MV", "ML", "MP", "V", "L", "P"] + ['R'] 105 | # "DATASET.ROIS": tune.grid_search([["all"], ["RSC"], ["E"], ["MV"], ["ML"], ["MP"], ["V"], ["L"], ["P"], ["R"]]), 106 | # "DATASET.ROIS": tune.grid_search([["E"], ["ML"], ["MP"], ["V"]]), 107 | # "TRAINER.ACCUMULATE_GRAD_BATCHES": tune.grid_search([1, 2]), 108 | "DATASET.ROIS": tune.grid_search([['all']]), 109 | } 110 | name = f"all" 111 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 112 | -------------------------------------------------------------------------------- /mem/scripts_heavy/xvbaa_darkpred.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from dark_onemodel import build_dmt, get_outs 4 | 5 | import argparse 6 | 7 | from datasets import NSDDataset 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument("--run_dir", type=str, default="/nfscc/alg23/xvba/roiall_bsz/tb314c_00000_TRAINER.ACCUMULATE_GRAD_BATCHES=1/") 11 | parser.add_argument("--save_name", type=str, default="xvbaa") 12 | parser.add_argument("--stage", type=str, default="predict") 13 | args = parser.parse_args() 14 | 15 | dm, plmodel, trainer = build_dmt(args.run_dir) 16 | soup = torch.load(os.path.join(args.run_dir, "soup.pth"), map_location=torch.device('cpu')) 17 | plmodel.load_state_dict(soup) 18 | plmodel.eval() 19 | 20 | subject_list = dm.cfg.DATASET.SUBJECT_LIST 21 | 22 | for subject in subject_list: 23 | if args.stage == "train": 24 | dataloader = dm.train_dataloader(subject=subject, shuffle=False) 25 | elif args.stage == "val": 26 | dataloader = dm.val_dataloader(subject=subject, shuffle=False) 27 | elif args.stage == "test": 28 | dataloader = dm.test_dataloader(subject=subject, shuffle=False) 29 | elif args.stage == "predict": 30 | dataloader = dm.predict_dataloader(subject=subject, shuffle=False) 31 | else: 32 | raise ValueError(f"stage {args.stage} not supported") 33 | # dataloader = dm.predict_dataloader(subject=subject) 34 | outs = get_outs(plmodel, trainer, dataloader) 35 | 36 | dataset : NSDDataset = dataloader.dataset 37 | dataset.save_dark(outs, args.save_name) -------------------------------------------------------------------------------- /mem/scripts_heavy/xvbab_submission.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import numpy as np 4 | 5 | from datamodule import NSDDatamodule 6 | from datasets import NSDDataset 7 | 8 | parser = argparse.ArgumentParser(description="") 9 | parser.add_argument("--dark_name", type=str, default="xvbaa") 10 | parser.add_argument("--data_dir", type=str, default="/data/ALG23/") 11 | parser.add_argument("--alg_dir", type=str, default="/nfscc/algonauts2023/") 12 | parser.add_argument("--save_dir", type=str, default="/nfscc/alg23/submission/xvba") 13 | args = parser.parse_args() 14 | 15 | from config_utils import get_cfg_defaults 16 | from config import AutoConfig 17 | 18 | SPACE = 'fsaverage' 19 | N = 327684 20 | 21 | for _i_subject in range(1, 9): 22 | subject = f"subj{_i_subject:02d}" 23 | 24 | cfg = get_cfg_defaults() 25 | cfg.DATASET.FMRI_SPACE = SPACE 26 | cfg.DATASET.ROIS = ["all"] 27 | cfg.DATASET.DARK_POSTFIX = args.dark_name 28 | cfg.DATASET.SUBJECT_LIST = [subject] 29 | 30 | dm = NSDDatamodule(cfg) 31 | dm.setup() 32 | 33 | dataloader = dm.predict_dataloader(subject=subject) 34 | dataset: NSDDataset = dataloader.dataset 35 | 36 | # /data/ALG23/subj08/image_ids/challenge_set.txt 37 | # /data/ALG23/subj01/image_ids/predict_set.txt 38 | 39 | predict_images = np.loadtxt(f"{args.data_dir}/{subject}/image_ids/predict_set.txt", dtype=int) 40 | challenge_images = np.loadtxt(f"{args.data_dir}/{subject}/image_ids/challenge_set.txt", dtype=int) 41 | 42 | # /data/ALG23/subj01/data_mask/fsaverage/voxel_indices.npy 43 | voxel_indices = np.load(f"{args.data_dir}/{subject}/data_mask/{SPACE}/voxel_indices.npy") 44 | 45 | # /nfscc/algonauts2023/subj01/roi_masks/lh.all-vertices_fsaverage_space.npy 46 | lh_mask = np.load(f"{args.alg_dir}/{subject}/roi_masks/lh.all-vertices_fsaverage_space.npy") 47 | rh_mask = np.load(f"{args.alg_dir}/{subject}/roi_masks/rh.all-vertices_fsaverage_space.npy") 48 | challenge_mask = np.concatenate([lh_mask, rh_mask], axis=0) 49 | num_lh = lh_mask.sum() 50 | num_rh = rh_mask.sum() 51 | num_challenge = challenge_mask.sum() 52 | 53 | # d = np.zeros(N, dtype=np.float32) 54 | 55 | lh, rh = [], [] 56 | for image_id in challenge_images: 57 | idxs = np.where(predict_images == image_id)[0] 58 | _lh, _rh = [], [] 59 | for _i in idxs: 60 | _data = dataset.load_one_dark(_i, args.dark_name) 61 | _data = _data[:num_challenge] 62 | _lh.append(_data[:num_lh]) 63 | _rh.append(_data[num_lh:]) 64 | _lh = np.stack(_lh, axis=0).mean(axis=0) 65 | _rh = np.stack(_rh, axis=0).mean(axis=0) 66 | lh.append(_lh) 67 | rh.append(_rh) 68 | lh = np.stack(lh, axis=0) 69 | rh = np.stack(rh, axis=0) 70 | 71 | save_dir = f"{args.save_dir}/{subject}" 72 | os.makedirs(save_dir, exist_ok=True) 73 | 74 | # lh_pred_test.npy 75 | np.save(f"{save_dir}/lh_pred_test.npy", lh) 76 | np.save(f"{save_dir}/rh_pred_test.npy", rh) -------------------------------------------------------------------------------- /mem/scripts_heavy/xvbc_roimodel.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, modular_train, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | parser.add_argument( 40 | "--topyneck_path", type=str, default="/nfscc/alg23/xvab/mem/topyneck.pth", help="Path to topyneck" 41 | ) 42 | return parser 43 | 44 | 45 | @my_nfs_cluster_job 46 | def job(tune_dict, cfg, progress=False, **kwargs): 47 | topyneck_path = kwargs.pop('topyneck_path') 48 | 49 | cfg.merge_from_list(dict_to_list(tune_dict)) 50 | 51 | cfg = max_batch_size(cfg) 52 | 53 | ret = modular_train( # todo 54 | cfg=cfg, 55 | progress=progress, 56 | topyneck_path=topyneck_path, 57 | rm_soup=False, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None, topyneck_path=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress, topyneck_path=topyneck_path), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | import traceback 87 | 88 | traceback.print_exc() 89 | 90 | 91 | if __name__ == "__main__": 92 | parser = get_parser() 93 | args = parser.parse_args() 94 | t = args.time if args.time > 0 else None 95 | 96 | cfg = load_from_yaml("/workspace/configs/xvba.yaml") 97 | cfg.RESULTS_DIR = "/nfscc/alg23/xvbc/" 98 | # cfg.DATAMODULE.BATCH_SIZE = 32 99 | # cfg.OPTIMIZER.LR = 1e-3 100 | 101 | rois = [["all"], ["RSC"], ["E"], ["MV"], ["ML"], ["MP"], ["V"], ["L"], ["P"], ["R"]] 102 | 103 | tune_config = { 104 | "DATASET.ROIS": tune.grid_search([['all']]), 105 | } 106 | name = f"all" 107 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 3, t, args.topyneck_path) 108 | 109 | tune_config = { 110 | "DATASET.ROIS": tune.grid_search([["RSC"], ["E"], ["MV"], ["ML"], ["MP"], ["V"], ["L"], ["P"], ["R"]]), 111 | } 112 | name = f"A" 113 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 114 | 115 | tune_config = { 116 | "DATASET.ROIS": tune.grid_search([[f'w_{i}'] for i in range(1, 10)]), 117 | } 118 | name = f"W" 119 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 120 | 121 | for _i_rand in range(1, 11): 122 | tune_config = { 123 | "DATASET.ROIS": tune.grid_search([[f'r_{_i_rand}_{i}'] for i in range(1, 10)]), 124 | } 125 | name = f"R{_i_rand}" 126 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 127 | 128 | # tune_config = { 129 | # "DATASET.ROIS": tune.grid_search([[f'r_2_{i}'] for i in range(1, 10)]), 130 | # } 131 | # name = f"R2" 132 | # run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 133 | 134 | # tune_config = { 135 | # "DATASET.ROIS": tune.grid_search([[f'r_3_{i}'] for i in range(1, 10)]), 136 | # } 137 | # name = f"R3" 138 | # run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 139 | 140 | -------------------------------------------------------------------------------- /mem/scripts_heavy/xvda_do_soup.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | import pandas as pd 4 | import torch 5 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 6 | 7 | import argparse 8 | import os 9 | import sys 10 | from random import seed, shuffle 11 | 12 | import numpy as np 13 | import ray 14 | from ray import tune 15 | from config import AutoConfig 16 | 17 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 18 | 19 | from read_utils import ( 20 | find_runs_from_exp_dir, 21 | read_config, 22 | read_short_config, 23 | read_score_df, 24 | list_runs_from_exp_names, 25 | ) 26 | from dark_onemodel import build_dmt, get_outs 27 | 28 | 29 | def get_parser(): 30 | parser = argparse.ArgumentParser(description="Ray Tune") 31 | parser.add_argument("--ckpt_dir", type=str, default="/data/dckpt/", help="ckpt dir") 32 | return parser 33 | 34 | 35 | args = get_parser().parse_args() 36 | 37 | runs = os.listdir(args.ckpt_dir) 38 | for run in runs: 39 | _d = os.path.join(args.ckpt_dir, run) 40 | ckpts = os.listdir(_d) 41 | ckpts = [os.path.join(_d, ckpt) for ckpt in ckpts] 42 | ckpts = [ckpt for ckpt in ckpts if ckpt.endswith(".ckpt")] 43 | 44 | soup_state_dict = None 45 | n_ingredients = 0 46 | for ckpt in ckpts: 47 | print(f"loading {ckpt}") 48 | state_dict = torch.load(ckpt, map_location='cpu')["state_dict"] 49 | if soup_state_dict is None: 50 | soup_state_dict = copy.deepcopy(state_dict) 51 | else: 52 | soup_state_dict = {k: v + soup_state_dict[k] for k, v in state_dict.items()} 53 | n_ingredients += 1 54 | soup_state_dict = {k: v / n_ingredients for k, v in soup_state_dict.items()} 55 | 56 | torch.save(soup_state_dict, os.path.join(_d, "soup.pth")) -------------------------------------------------------------------------------- /mem/scripts_heavy/xvfeb_dosoup.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | import pandas as pd 4 | import torch 5 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 6 | 7 | import argparse 8 | import os 9 | import sys 10 | from random import seed, shuffle 11 | 12 | import numpy as np 13 | import ray 14 | from ray import tune 15 | from config import AutoConfig 16 | 17 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml, save_to_yaml 18 | 19 | from read_utils import ( 20 | find_runs_from_exp_dir, 21 | read_config, 22 | read_short_config, 23 | read_score_df, 24 | list_runs_from_exp_names, 25 | ) 26 | from dark_onemodel import build_dmt, get_outs 27 | 28 | 29 | def get_parser(): 30 | parser = argparse.ArgumentParser(description="Ray Tune") 31 | parser.add_argument("--exp_dir", type=str, default="/nfscc/alg23/xvfe/", help="exp dir") 32 | parser.add_argument("--save_dir", type=str, default="/nfscc/alg23/xvfeb/", help="save dir") 33 | return parser 34 | 35 | 36 | args = get_parser().parse_args() 37 | 38 | save_dir = args.save_dir 39 | os.makedirs(save_dir, exist_ok=True) 40 | 41 | runs = find_runs_from_exp_dir(args.exp_dir) 42 | 43 | print(len(runs)) 44 | 45 | 46 | def do_soup(ckpts): 47 | soup_state_dict = None 48 | n_ingredients = 0 49 | for ckpt in ckpts: 50 | print(f"loading {ckpt}") 51 | state_dict = torch.load(ckpt, map_location='cpu') 52 | if soup_state_dict is None: 53 | soup_state_dict = copy.deepcopy(state_dict) 54 | else: 55 | soup_state_dict = {k: v + soup_state_dict[k] for k, v in state_dict.items()} 56 | n_ingredients += 1 57 | soup_state_dict = {k: v / n_ingredients for k, v in soup_state_dict.items()} 58 | 59 | return soup_state_dict 60 | 61 | answer = np.concatenate([np.arange(0, 8), np.arange(21, 35)]).tolist() 62 | memory = np.arange(8, 19).tolist() 63 | time = np.arange(19, 21).tolist() 64 | 65 | full = np.arange(0, 35).tolist() 66 | 67 | no_answer = [i for i in full if i not in answer] 68 | no_memory = [i for i in full if i not in memory] 69 | no_time = [i for i in full if i not in time] 70 | 71 | 72 | datas = [] 73 | for run in runs: 74 | done_file = os.path.join(run, "done") 75 | if not os.path.exists(done_file): 76 | continue 77 | 78 | cfg = read_config(run) 79 | if cfg.EXPERIMENTAL.USE_PREV_FRAME == False and cfg.EXPERIMENTAL.BEHV_SELECTION == [-1]: 80 | row = 8 81 | elif cfg.EXPERIMENTAL.USE_PREV_FRAME == True and cfg.EXPERIMENTAL.BEHV_SELECTION == no_memory: 82 | row = 9 83 | elif cfg.EXPERIMENTAL.USE_PREV_FRAME == False and cfg.EXPERIMENTAL.BEHV_SELECTION == no_memory: 84 | row = 5 85 | elif cfg.EXPERIMENTAL.BEHV_SELECTION == no_time: 86 | row = 6 87 | elif cfg.EXPERIMENTAL.BEHV_SELECTION == no_answer: 88 | row = 7 89 | elif cfg.MODEL.COND.IN_DIM == 13: 90 | row = 3 91 | elif cfg.MODEL.COND.IN_DIM == 6: 92 | row = 4 93 | elif cfg.EXPERIMENTAL.USE_DEV_MODEL == False: 94 | row = 1 95 | else: 96 | row = 2 97 | 98 | soup_file = os.path.join(run, "soup.pth") 99 | lr = cfg.OPTIMIZER.LR 100 | if lr != 0.0003: 101 | continue 102 | 103 | datas.append([row, lr, soup_file, cfg]) 104 | df = pd.DataFrame(datas, columns=['row', 'lr', 'soup_file', 'cfg']) 105 | 106 | # for row in [1, 2, 3, 4, 5, 6, 7, 8, 9]: 107 | for row in [8, 9]: 108 | _row_df = df[df.row == row] 109 | if len(_row_df) != 1: 110 | continue 111 | print("soup on") 112 | print(_row_df[['row', 'lr', 'soup_file']]) 113 | 114 | soup_files = _row_df.soup_file.tolist() 115 | 116 | _save_dir = os.path.join(save_dir, f"row_{row}") 117 | os.makedirs(_save_dir, exist_ok=True) 118 | soup = do_soup(soup_files) 119 | torch.save(soup, os.path.join(_save_dir, f"soup.pth")) 120 | cfg: AutoConfig = _row_df.iloc[0].cfg 121 | save_to_yaml(cfg, os.path.join(_save_dir, f"config.yaml")) -------------------------------------------------------------------------------- /mem/scripts_heavy/xvfec_dark_predict.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | from config import AutoConfig 4 | from config_utils import load_from_yaml 5 | from datamodule import NSDDatamodule 6 | from plmodels import PlVEModel 7 | import torch 8 | from dark_onemodel import build_dmt, get_outs 9 | import pytorch_lightning as pl 10 | 11 | import argparse 12 | 13 | from datasets import NSDDataset 14 | 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("--load_dir", type=str, default="/nfscc/alg23/xvfeb/") 17 | args = parser.parse_args() 18 | 19 | def _build_dmt(soup_dir): 20 | cfg: AutoConfig = load_from_yaml(os.path.join(soup_dir, "config.yaml")) 21 | cfg.EXPERIMENTAL.SHUFFLE_VAL = False 22 | dm = NSDDatamodule(cfg) 23 | dm.setup() 24 | 25 | plmodel = PlVEModel(cfg, dm.roi_dict, dm.neuron_coords_dict) 26 | 27 | trainer = pl.Trainer( 28 | accelerator="cuda", 29 | devices=[0], 30 | precision=16, 31 | enable_progress_bar=False, 32 | ) 33 | 34 | return dm, plmodel, trainer 35 | 36 | run_dirs = os.listdir(args.load_dir) 37 | run_dirs = sorted(run_dirs) 38 | row_names = copy.deepcopy(run_dirs) 39 | run_dirs = [os.path.join(args.load_dir, run_dir) for run_dir in run_dirs] 40 | 41 | for run_dir, row in zip(run_dirs, row_names): 42 | dark_name = f"xvfec_{row}" 43 | dm, plmodel, trainer = _build_dmt(run_dir) 44 | soup = torch.load(os.path.join(run_dir, "soup.pth"), map_location=torch.device('cpu')) 45 | plmodel.load_state_dict(soup) 46 | plmodel.eval() 47 | 48 | subject_list = dm.cfg.DATASET.SUBJECT_LIST 49 | 50 | for subject in subject_list: 51 | dataloader = dm.predict_dataloader(subject=subject) 52 | outs = get_outs(plmodel, trainer, dataloader) 53 | 54 | dataset : NSDDataset = dataloader.dataset 55 | dataset.save_dark(outs, dark_name) 56 | 57 | dataloader = dm.test_dataloader(subject=subject) 58 | outs = get_outs(plmodel, trainer, dataloader) 59 | 60 | dataset : NSDDataset = dataloader.dataset 61 | dataset.save_dark(outs, dark_name) -------------------------------------------------------------------------------- /mem/scripts_heavy/xvfed_local_eval.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import numpy as np 4 | import torch 5 | 6 | import pandas as pd 7 | 8 | from metrics import vectorized_correlation 9 | from datamodule import NSDDatamodule 10 | from datasets import NSDDataset 11 | 12 | from config_utils import get_cfg_defaults 13 | from config import AutoConfig 14 | 15 | SPACE = 'fsaverage' 16 | N = 327684 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="") 20 | parser.add_argument("--dark_names", nargs="+", type=str, default=["xvdb"]) 21 | return parser 22 | 23 | 24 | args = get_parser().parse_args() 25 | 26 | datas = [] 27 | # for _i_row in range(1, 8): 28 | # dark_name = f"xvfec_row_{_i_row}" 29 | # for dark_name in ['xvdb']: 30 | # for dark_name in ['xvfee_mem', 'xvfee_baseline']: 31 | # for dark_name in ['xvfef_fmn', 'xvfe_nm1']: 32 | # for dark_name in ['xvfe_nm2', 'xvfef_bbaseline']: 33 | for dark_name in args.dark_names: 34 | rs = [] 35 | for _i_subject in range(1, 9): 36 | subject = f"subj{_i_subject:02d}" 37 | 38 | cfg = get_cfg_defaults() 39 | cfg.DATASET.FMRI_SPACE = SPACE 40 | cfg.DATASET.ROIS = ["orig"] 41 | cfg.DATASET.DARK_POSTFIX = dark_name 42 | cfg.DATASET.SUBJECT_LIST = [subject] 43 | 44 | dm = NSDDatamodule(cfg) 45 | dm.setup() 46 | 47 | dataloader = dm.test_dataloader(subject=subject) 48 | dataset: NSDDataset = dataloader.dataset 49 | 50 | ys = [] 51 | darks = [] 52 | for batch in dataloader: 53 | ( 54 | img, 55 | prev_img, 56 | prev_feats, 57 | y, 58 | dark, 59 | bhv, 60 | prev_bhvs, 61 | ssid, 62 | subject_name, 63 | data_idx, 64 | ) = batch 65 | 66 | ys += y 67 | darks += dark 68 | 69 | ys = torch.stack(ys) 70 | darks = torch.stack(darks) 71 | 72 | r = vectorized_correlation(ys.cuda(), darks.cuda()) 73 | 74 | rs.append(r) 75 | 76 | print(f"{dark_name} {subject} {r.mean().item()}") 77 | 78 | rs = torch.concatenate(rs) 79 | 80 | mean_r = rs.mean() 81 | mean_r2 = (rs ** 2).mean() 82 | 83 | datas.append([dark_name, mean_r.item(), mean_r2.item()]) 84 | 85 | df = pd.DataFrame(datas, columns=['dark_name', 'mean_r', 'mean_r2']) 86 | 87 | def print_csv(df): 88 | print(df.to_csv(index=False, float_format="%.3f")) 89 | 90 | print_csv(df) -------------------------------------------------------------------------------- /mem/scripts_heavy/xvfee_morebaseline.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, modular_train, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--time", type=int, default=-1, help="Time limit of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--topyneck_path", 38 | type=str, 39 | default="/nfscc/alg23/xvab/mem/topyneck.pth", 40 | help="Path to topyneck", 41 | ) 42 | return parser 43 | 44 | 45 | @my_nfs_cluster_job 46 | def job(tune_dict, cfg, progress=False, **kwargs): 47 | topyneck_path = kwargs.pop("topyneck_path") 48 | if "row" in tune_dict: 49 | global ROW_LIST 50 | row = tune_dict["row"] 51 | tune_dict.pop("row") 52 | print(ROW_LIST[row]) 53 | tune_dict.update(ROW_LIST[row]) 54 | 55 | cfg.merge_from_list(dict_to_list(tune_dict)) 56 | 57 | cfg = max_batch_size(cfg) 58 | 59 | ret = simple_train( # todo 60 | cfg=cfg, 61 | progress=progress, 62 | topyneck_path=topyneck_path, 63 | rm_soup=False, 64 | **kwargs, 65 | ) 66 | 67 | 68 | def run_ray( 69 | name, 70 | cfg, 71 | tune_config, 72 | rm=False, 73 | progress=False, 74 | verbose=False, 75 | num_samples=1, 76 | time_budget_s=None, 77 | topyneck_path=None, 78 | ): 79 | cfg = copy.deepcopy(cfg) 80 | if rm: 81 | import shutil 82 | 83 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 84 | 85 | try: 86 | ana = tune.run( 87 | tune.with_parameters( 88 | job, cfg=cfg, progress=progress, topyneck_path=topyneck_path 89 | ), 90 | local_dir=cfg.RESULTS_DIR, 91 | config=tune_config, 92 | resources_per_trial={"cpu": 1, "gpu": 1}, 93 | num_samples=num_samples, 94 | name=name, 95 | verbose=verbose, 96 | resume="AUTO+ERRORED", 97 | trial_dirname_creator=trial_dirname_creator, 98 | time_budget_s=time_budget_s, 99 | ) 100 | except Exception as e: 101 | print(e) 102 | import traceback 103 | 104 | traceback.print_exc() 105 | 106 | 107 | ROW_LIST = [] 108 | 109 | if __name__ == "__main__": 110 | parser = get_parser() 111 | args = parser.parse_args() 112 | t = args.time if args.time > 0 else None 113 | 114 | cfg = load_from_yaml("/workspace/configs/xvfe.yaml") 115 | cfg.RESULTS_DIR = "/nfscc/alg23/xvfee/" 116 | 117 | cfg.DATASET.DARK_POSTFIX = '' 118 | cfg.LOSS.DARK.USE = False 119 | 120 | ROW_LIST = [ 121 | { 122 | "EXPERIMENTAL.USE_DEV_MODEL": False, 123 | }, # full 124 | { 125 | "EXPERIMENTAL.USE_DEV_MODEL": True, 126 | "EXPERIMENTAL.USE_BHV": False, 127 | "EXPERIMENTAL.USE_BHV_PASSTHROUGH": False, 128 | "EXPERIMENTAL.USE_PREV_FRAME": False, 129 | }, # baseline 130 | ] 131 | 132 | tune_config = { 133 | "row": tune.grid_search(list(range(len(ROW_LIST)))), 134 | "OPTIMIZER.LR": tune.grid_search([3e-4]), 135 | } 136 | name = f"baseline" 137 | run_ray( 138 | name, 139 | cfg, 140 | tune_config, 141 | args.rm, 142 | args.progress, 143 | args.verbose, 144 | 1, 145 | t, 146 | args.topyneck_path, 147 | ) 148 | -------------------------------------------------------------------------------- /mem/scripts_heavy/xvfef_moredistill.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, modular_train, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--time", type=int, default=-1, help="Time limit of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--topyneck_path", 38 | type=str, 39 | default="/nfscc/alg23/xvab/mem/topyneck.pth", 40 | help="Path to topyneck", 41 | ) 42 | return parser 43 | 44 | 45 | @my_nfs_cluster_job 46 | def job(tune_dict, cfg, progress=False, **kwargs): 47 | topyneck_path = kwargs.pop("topyneck_path") 48 | if "row" in tune_dict: 49 | global ROW_LIST 50 | row = tune_dict["row"] 51 | tune_dict.pop("row") 52 | print(ROW_LIST[row]) 53 | tune_dict.update(ROW_LIST[row]) 54 | 55 | cfg.merge_from_list(dict_to_list(tune_dict)) 56 | 57 | cfg = max_batch_size(cfg) 58 | 59 | ret = simple_train( # todo 60 | cfg=cfg, 61 | progress=progress, 62 | topyneck_path=topyneck_path, 63 | rm_soup=False, 64 | **kwargs, 65 | ) 66 | 67 | 68 | def run_ray( 69 | name, 70 | cfg, 71 | tune_config, 72 | rm=False, 73 | progress=False, 74 | verbose=False, 75 | num_samples=1, 76 | time_budget_s=None, 77 | topyneck_path=None, 78 | ): 79 | cfg = copy.deepcopy(cfg) 80 | if rm: 81 | import shutil 82 | 83 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 84 | 85 | try: 86 | ana = tune.run( 87 | tune.with_parameters( 88 | job, cfg=cfg, progress=progress, topyneck_path=topyneck_path 89 | ), 90 | local_dir=cfg.RESULTS_DIR, 91 | config=tune_config, 92 | resources_per_trial={"cpu": 1, "gpu": 1}, 93 | num_samples=num_samples, 94 | name=name, 95 | verbose=verbose, 96 | resume="AUTO+ERRORED", 97 | trial_dirname_creator=trial_dirname_creator, 98 | time_budget_s=time_budget_s, 99 | ) 100 | except Exception as e: 101 | print(e) 102 | import traceback 103 | 104 | traceback.print_exc() 105 | 106 | 107 | ROW_LIST = [] 108 | 109 | if __name__ == "__main__": 110 | parser = get_parser() 111 | args = parser.parse_args() 112 | t = args.time if args.time > 0 else None 113 | 114 | cfg = load_from_yaml("/workspace/configs/xvfe.yaml") 115 | cfg.RESULTS_DIR = "/nfscc/alg23/xvfef/" 116 | cfg.DATASET.DARK_POSTFIX = 'xvfee_mem' 117 | 118 | ROW_LIST = [ 119 | { 120 | "EXPERIMENTAL.USE_DEV_MODEL": True, 121 | "EXPERIMENTAL.USE_BHV": False, 122 | "EXPERIMENTAL.USE_BHV_PASSTHROUGH": False, 123 | "EXPERIMENTAL.USE_PREV_FRAME": False, 124 | }, # baseline 125 | ] 126 | 127 | tune_config = { 128 | "row": tune.grid_search(list(range(len(ROW_LIST)))), 129 | "OPTIMIZER.LR": tune.grid_search([3e-4]), 130 | } 131 | name = f"baseline" 132 | run_ray( 133 | name, 134 | cfg, 135 | tune_config, 136 | args.rm, 137 | args.progress, 138 | args.verbose, 139 | 1, 140 | t, 141 | args.topyneck_path, 142 | ) 143 | 144 | 145 | cfg.DATASET.DARK_POSTFIX = 'xvfee_baseline' 146 | 147 | ROW_LIST = [ 148 | { 149 | "EXPERIMENTAL.USE_DEV_MODEL": True, 150 | "EXPERIMENTAL.USE_BHV": False, 151 | "EXPERIMENTAL.USE_BHV_PASSTHROUGH": False, 152 | "EXPERIMENTAL.USE_PREV_FRAME": False, 153 | }, # baseline 154 | ] 155 | 156 | tune_config = { 157 | "row": tune.grid_search(list(range(len(ROW_LIST)))), 158 | "OPTIMIZER.LR": tune.grid_search([3e-4]), 159 | } 160 | name = f"bbaseline" 161 | run_ray( 162 | name, 163 | cfg, 164 | tune_config, 165 | args.rm, 166 | args.progress, 167 | args.verbose, 168 | 1, 169 | t, 170 | args.topyneck_path, 171 | ) -------------------------------------------------------------------------------- /mem/scripts_heavy/xvga_distill_roimodel.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, modular_train, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | parser.add_argument( 40 | "--topyneck_path", type=str, default="/nfscc/alg23/xvab/mem/topyneck.pth", help="Path to topyneck" 41 | ) 42 | return parser 43 | 44 | 45 | @my_nfs_cluster_job 46 | def job(tune_dict, cfg, progress=False, **kwargs): 47 | topyneck_path = kwargs.pop('topyneck_path') 48 | 49 | cfg.merge_from_list(dict_to_list(tune_dict)) 50 | 51 | cfg = max_batch_size(cfg) 52 | 53 | ret = simple_train( # todo 54 | cfg=cfg, 55 | progress=progress, 56 | topyneck_path=topyneck_path, 57 | rm_soup=False, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None, topyneck_path=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress, topyneck_path=topyneck_path), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | import traceback 87 | 88 | traceback.print_exc() 89 | 90 | 91 | if __name__ == "__main__": 92 | parser = get_parser() 93 | args = parser.parse_args() 94 | t = args.time if args.time > 0 else None 95 | 96 | cfg = load_from_yaml("/workspace/configs/xvga.yaml") 97 | cfg.RESULTS_DIR = "/nfscc/alg23/xvga/" 98 | 99 | rois = [["all"], ["RSC"], ["E"], ["MV"], ["ML"], ["MP"], ["V"], ["L"], ["P"], ["R"]] 100 | 101 | tune_config = { 102 | "DATASET.ROIS": tune.grid_search([['all']]), 103 | } 104 | name = f"all" 105 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 3, t, args.topyneck_path) 106 | 107 | tune_config = { 108 | "DATASET.ROIS": tune.grid_search([["RSC"], ["E"], ["MV"], ["ML"], ["MP"], ["V"], ["L"], ["P"], ["R"]]), 109 | } 110 | name = f"A" 111 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 112 | 113 | tune_config = { 114 | "DATASET.ROIS": tune.grid_search([[f'w_{i}'] for i in range(1, 10)]), 115 | } 116 | name = f"W" 117 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 118 | 119 | for _i_rand in range(1, 11): 120 | tune_config = { 121 | "DATASET.ROIS": tune.grid_search([[f'r_{_i_rand}_{i}'] for i in range(1, 10)]), 122 | } 123 | name = f"R{_i_rand}" 124 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 125 | 126 | 127 | # I am missing 0.07 (61.15) score to win the competition, so just add more models 128 | 129 | for _i_roi in range(2, 4): 130 | tune_config = { 131 | "DATASET.ROIS": tune.grid_search([["RSC"], ["E"], ["MV"], ["ML"], ["MP"], ["V"], ["L"], ["P"], ["R"]]), 132 | } 133 | name = f"A{_i_roi}" 134 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 135 | 136 | tune_config = { 137 | "DATASET.ROIS": tune.grid_search([[f'w_{i}'] for i in range(1, 10)]), 138 | } 139 | name = f"W{_i_roi}" 140 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 141 | -------------------------------------------------------------------------------- /mem/scripts_light/xvba_nerfed_soup.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, modular_train, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | parser.add_argument( 21 | "-v", "--verbose", action="store_true", help="verbose", default=False 22 | ) 23 | parser.add_argument( 24 | "-p", "--progress", action="store_true", help="progress", default=False 25 | ) 26 | parser.add_argument( 27 | "--rm", action="store_true", default=False, help="Remove all previous results" 28 | ) 29 | parser.add_argument( 30 | "--name", type=str, default="all", help="Name of the experiment" 31 | ) 32 | parser.add_argument( 33 | "--topyneck_path", type=str, default="", help="Path to topyneck" 34 | ) 35 | parser.add_argument( 36 | "--cfg_path", 37 | type=str, 38 | default="/workspace/configs/xvba.yaml", 39 | help="Path to config", 40 | ) 41 | parser.add_argument( 42 | "--results_dir", type=str, default="/nfscc/alg23/xvba/", help="Path to results" 43 | ) 44 | parser.add_argument("--batch_size", type=int, default=32, help="Batch size") 45 | parser.add_argument("--distill_name", type=str, default="", help="dark knowledge name") 46 | return parser 47 | 48 | 49 | @my_nfs_cluster_job 50 | def job(tune_dict, cfg, progress=False, **kwargs): 51 | topyneck_path = kwargs.pop("topyneck_path") 52 | 53 | cfg.merge_from_list(dict_to_list(tune_dict)) 54 | 55 | cfg = max_batch_size(cfg) 56 | 57 | ret = simple_train( # todo 58 | cfg=cfg, 59 | progress=progress, 60 | topyneck_path=topyneck_path, 61 | rm_soup=False, 62 | **kwargs, 63 | ) 64 | 65 | 66 | def run_ray( 67 | name, 68 | cfg, 69 | tune_config, 70 | rm=False, 71 | progress=False, 72 | verbose=False, 73 | num_samples=1, 74 | time_budget_s=None, 75 | topyneck_path=None, 76 | ): 77 | cfg = copy.deepcopy(cfg) 78 | if rm: 79 | import shutil 80 | 81 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 82 | 83 | try: 84 | ana = tune.run( 85 | tune.with_parameters( 86 | job, cfg=cfg, progress=progress, topyneck_path=topyneck_path 87 | ), 88 | local_dir=cfg.RESULTS_DIR, 89 | config=tune_config, 90 | resources_per_trial={"cpu": 1, "gpu": 1}, 91 | num_samples=num_samples, 92 | name=name, 93 | verbose=verbose, 94 | resume="AUTO+ERRORED", 95 | trial_dirname_creator=trial_dirname_creator, 96 | time_budget_s=time_budget_s, 97 | ) 98 | except Exception as e: 99 | print(e) 100 | import traceback 101 | 102 | traceback.print_exc() 103 | 104 | 105 | if __name__ == "__main__": 106 | parser = get_parser() 107 | args = parser.parse_args() 108 | 109 | cfg = load_from_yaml(args.cfg_path) 110 | cfg.RESULTS_DIR = args.results_dir 111 | cfg.DATAMODULE.BATCH_SIZE = args.batch_size 112 | 113 | dark_name = args.distill_name 114 | if dark_name is not None and len(dark_name) > 0: 115 | cfg.LOSS.DARK.USE = True 116 | cfg.DATASET.DARK_POSTFIX = dark_name 117 | 118 | tune_config = { 119 | # "DATASET.SUBJECT_LIST": tune.grid_search([["subj01"]]), 120 | # "DATASET.ROIS": tune.grid_search([["all"], ["RSC"], ["E"], ["MV"], ["ML"], ["MP"], ["V"], ["L"], ["P"], ["R"]]), 121 | # "DATASET.ROIS": tune.grid_search([["E"], ["ML"], ["MP"], ["V"]]), 122 | "DATASET.ROIS": tune.grid_search([["all"]]), 123 | } 124 | name = args.name 125 | run_ray( 126 | name, 127 | cfg, 128 | tune_config, 129 | args.rm, 130 | args.progress, 131 | args.verbose, 132 | 1, 133 | None, 134 | args.topyneck_path, 135 | ) 136 | -------------------------------------------------------------------------------- /mem/scripts_light/xvbaa_darkpred.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from dark_onemodel import build_dmt, get_outs 4 | 5 | import argparse 6 | 7 | from datasets import NSDDataset 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument("--run_dir", type=str, default="/nfscc/alg23/xvba/roiall_bsz/tb314c_00000_TRAINER.ACCUMULATE_GRAD_BATCHES=1/") 11 | parser.add_argument("--save_name", type=str, default="xvbaa") 12 | parser.add_argument("--stage", type=str, default="predict") 13 | args = parser.parse_args() 14 | 15 | dm, plmodel, trainer = build_dmt(args.run_dir) 16 | soup = torch.load(os.path.join(args.run_dir, "soup.pth"), map_location=torch.device('cpu')) 17 | plmodel.load_state_dict(soup) 18 | plmodel.eval() 19 | 20 | subject_list = dm.cfg.DATASET.SUBJECT_LIST 21 | 22 | for subject in subject_list: 23 | if args.stage == "train": 24 | dataloader = dm.train_dataloader(subject=subject, shuffle=False) 25 | elif args.stage == "val": 26 | dataloader = dm.val_dataloader(subject=subject, shuffle=False) 27 | elif args.stage == "test": 28 | dataloader = dm.test_dataloader(subject=subject, shuffle=False) 29 | elif args.stage == "predict": 30 | dataloader = dm.predict_dataloader(subject=subject, shuffle=False) 31 | else: 32 | raise ValueError(f"stage {args.stage} not supported") 33 | # dataloader = dm.predict_dataloader(subject=subject) 34 | outs = get_outs(plmodel, trainer, dataloader) 35 | 36 | dataset : NSDDataset = dataloader.dataset 37 | dataset.save_dark(outs, args.save_name) -------------------------------------------------------------------------------- /mem/scripts_light/xvbab_submission.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import numpy as np 4 | 5 | from datamodule import NSDDatamodule 6 | from datasets import NSDDataset 7 | 8 | parser = argparse.ArgumentParser(description="") 9 | parser.add_argument("--dark_name", type=str, default="xvbaa") 10 | parser.add_argument("--data_dir", type=str, default="/data/ALG23/") 11 | parser.add_argument("--alg_dir", type=str, default="/nfscc/algonauts2023/") 12 | parser.add_argument("--save_dir", type=str, default="/nfscc/alg23/submission/xvba") 13 | args = parser.parse_args() 14 | 15 | from config_utils import get_cfg_defaults 16 | from config import AutoConfig 17 | 18 | SPACE = 'fsaverage' 19 | N = 327684 20 | 21 | for _i_subject in range(1, 9): 22 | subject = f"subj{_i_subject:02d}" 23 | 24 | cfg = get_cfg_defaults() 25 | cfg.DATASET.FMRI_SPACE = SPACE 26 | cfg.DATASET.ROIS = ["all"] 27 | cfg.DATASET.DARK_POSTFIX = args.dark_name 28 | cfg.DATASET.SUBJECT_LIST = [subject] 29 | 30 | dm = NSDDatamodule(cfg) 31 | dm.setup() 32 | 33 | dataloader = dm.predict_dataloader(subject=subject) 34 | dataset: NSDDataset = dataloader.dataset 35 | 36 | # /data/ALG23/subj08/image_ids/challenge_set.txt 37 | # /data/ALG23/subj01/image_ids/predict_set.txt 38 | 39 | predict_images = np.loadtxt(f"{args.data_dir}/{subject}/image_ids/predict_set.txt", dtype=int) 40 | challenge_images = np.loadtxt(f"{args.data_dir}/{subject}/image_ids/challenge_set.txt", dtype=int) 41 | 42 | # /data/ALG23/subj01/data_mask/fsaverage/voxel_indices.npy 43 | voxel_indices = np.load(f"{args.data_dir}/{subject}/data_mask/{SPACE}/voxel_indices.npy") 44 | 45 | # /nfscc/algonauts2023/subj01/roi_masks/lh.all-vertices_fsaverage_space.npy 46 | lh_mask = np.load(f"{args.alg_dir}/{subject}/roi_masks/lh.all-vertices_fsaverage_space.npy") 47 | rh_mask = np.load(f"{args.alg_dir}/{subject}/roi_masks/rh.all-vertices_fsaverage_space.npy") 48 | challenge_mask = np.concatenate([lh_mask, rh_mask], axis=0) 49 | num_lh = lh_mask.sum() 50 | num_rh = rh_mask.sum() 51 | num_challenge = challenge_mask.sum() 52 | 53 | # d = np.zeros(N, dtype=np.float32) 54 | 55 | lh, rh = [], [] 56 | for image_id in challenge_images: 57 | idxs = np.where(predict_images == image_id)[0] 58 | _lh, _rh = [], [] 59 | for _i in idxs: 60 | _data = dataset.load_one_dark(_i, args.dark_name) 61 | _data = _data[:num_challenge] 62 | _lh.append(_data[:num_lh]) 63 | _rh.append(_data[num_lh:]) 64 | _lh = np.stack(_lh, axis=0).mean(axis=0) 65 | _rh = np.stack(_rh, axis=0).mean(axis=0) 66 | lh.append(_lh) 67 | rh.append(_rh) 68 | lh = np.stack(lh, axis=0) 69 | rh = np.stack(rh, axis=0) 70 | 71 | save_dir = f"{args.save_dir}/{subject}" 72 | os.makedirs(save_dir, exist_ok=True) 73 | 74 | # lh_pred_test.npy 75 | np.save(f"{save_dir}/lh_pred_test.npy", lh) 76 | np.save(f"{save_dir}/rh_pred_test.npy", rh) -------------------------------------------------------------------------------- /mem/scripts_paper/xbaa_prevframe_b5k.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | return parser 40 | 41 | @my_nfs_cluster_job 42 | def job(tune_dict, cfg, progress=False, **kwargs): 43 | if "row" in tune_dict: 44 | global ROW_LIST 45 | row = tune_dict["row"] 46 | tune_dict.pop("row") 47 | print(ROW_LIST[row]) 48 | tune_dict.update(ROW_LIST[row]) 49 | 50 | cfg.merge_from_list(dict_to_list(tune_dict)) 51 | 52 | cfg = max_batch_size(cfg) 53 | 54 | ret = simple_train( 55 | cfg=cfg, 56 | progress=progress, 57 | rm_soup=True, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | # print traceback 87 | import traceback 88 | 89 | traceback.print_exc() 90 | 91 | 92 | # ROW_LIST = [ 93 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 94 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 95 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 96 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 97 | # ] 98 | # - 99 | if __name__ == "__main__": 100 | parser = get_parser() 101 | args = parser.parse_args() 102 | t = args.time if args.time > 0 else None 103 | 104 | cfg = load_from_yaml("/workspace/configs/bold5000.yaml") 105 | 106 | cfg.DATASET.ROIS = ["all"] 107 | cfg.DATASET.FMRI_SPACE = 'visual_B' 108 | 109 | cfg.RESULTS_DIR = "/nfscc/alg23/xbaa/" 110 | 111 | cfg.EXPERIMENTAL.USE_DEV_MODEL = True 112 | 113 | cfg.EXPERIMENTAL.USE_PREV_FRAME = False 114 | cfg.EXPERIMENTAL.BLANK_IMAGE = False 115 | cfg.EXPERIMENTAL.USE_RETINA_MAPPER = False 116 | cfg.EXPERIMENTAL.USE_LAYER_SELECTOR = False 117 | cfg.EXPERIMENTAL.USE_BHV = False 118 | cfg.EXPERIMENTAL.USE_BHV_PASSTHROUGH = False 119 | cfg.EXPERIMENTAL.BACKBONE_NOGRAD = True 120 | cfg.MODEL.BACKBONE.LORA.SCALE = 0.0 121 | cfg.MODEL.BACKBONE.ADAPTIVE_LN.SCALE = 0.0 122 | 123 | # prev + current 124 | cfg.EXPERIMENTAL.USE_PREV_FRAME = True 125 | tune_config = { 126 | "DATASET.FMRI_SPACE": tune.grid_search(['visual_D', 'visual_B']), 127 | "DATASET.SUBJECT_LIST": tune.grid_search([['CSI1'], ['CSI2'], ['CSI3']]), 128 | "EXPERIMENTAL.USE_PREV_FRAME": tune.grid_search([True]), 129 | "EXPERIMENTAL.T_IMAGE": tune.grid_search([0]), 130 | } 131 | name = f"prev_current" 132 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 133 | 134 | # baseline 135 | tune_config = { 136 | "DATASET.FMRI_SPACE": tune.grid_search(['visual_D', 'visual_B']), 137 | "DATASET.SUBJECT_LIST": tune.grid_search([['CSI1'], ['CSI2'], ['CSI3']]), 138 | "EXPERIMENTAL.SHUFFLE_IMAGES": tune.grid_search([True]), 139 | } 140 | name = f"rand" 141 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 142 | 143 | # prev 144 | cfg.EXPERIMENTAL.BLANK_IMAGE = False 145 | tune_config = { 146 | "DATASET.FMRI_SPACE": tune.grid_search(['visual_D', 'visual_B']), 147 | "DATASET.SUBJECT_LIST": tune.grid_search([['CSI1'], ['CSI2'], ['CSI3']]), 148 | "EXPERIMENTAL.T_IMAGE": tune.grid_search(list(range(0, -32, -1))), 149 | } 150 | name = f"prev" 151 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 152 | -------------------------------------------------------------------------------- /mem/scripts_paper/xdaa_prevframe.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | return parser 40 | 41 | @my_nfs_cluster_job 42 | def job(tune_dict, cfg, progress=False, **kwargs): 43 | if "row" in tune_dict: 44 | global ROW_LIST 45 | row = tune_dict["row"] 46 | tune_dict.pop("row") 47 | print(ROW_LIST[row]) 48 | tune_dict.update(ROW_LIST[row]) 49 | 50 | cfg.merge_from_list(dict_to_list(tune_dict)) 51 | 52 | cfg = max_batch_size(cfg) 53 | 54 | ret = simple_train( 55 | cfg=cfg, 56 | progress=progress, 57 | rm_soup=True, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | # print traceback 87 | import traceback 88 | 89 | traceback.print_exc() 90 | 91 | 92 | # ROW_LIST = [ 93 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 94 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 95 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 96 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 97 | # ] 98 | # - 99 | if __name__ == "__main__": 100 | parser = get_parser() 101 | args = parser.parse_args() 102 | t = args.time if args.time > 0 else None 103 | 104 | cfg = load_from_yaml("/workspace/configs/dev.yaml") 105 | 106 | cfg.DATASET.ROIS = ["all"] 107 | cfg.DATASET.FMRI_SPACE = 'fship' 108 | 109 | cfg.RESULTS_DIR = "/nfscc/alg23/xdaa/b3" 110 | 111 | cfg.EXPERIMENTAL.USE_DEV_MODEL = True 112 | 113 | cfg.EXPERIMENTAL.USE_PREV_FRAME = False 114 | cfg.EXPERIMENTAL.BLANK_IMAGE = False 115 | cfg.EXPERIMENTAL.USE_RETINA_MAPPER = False 116 | cfg.EXPERIMENTAL.USE_LAYER_SELECTOR = False 117 | cfg.EXPERIMENTAL.USE_BHV = False 118 | cfg.EXPERIMENTAL.USE_BHV_PASSTHROUGH = False 119 | cfg.EXPERIMENTAL.BACKBONE_NOGRAD = True 120 | cfg.MODEL.BACKBONE.LORA.SCALE = 0.0 121 | cfg.MODEL.BACKBONE.ADAPTIVE_LN.SCALE = 0.0 122 | 123 | # prev 124 | cfg.EXPERIMENTAL.BLANK_IMAGE = False 125 | tune_config = { 126 | "DATASET.SUBJECT_LIST": tune.grid_search([['subj01'], ['subj02'], ['subj03'], ['subj04'], ['subj05'], ['subj06'], ['subj07'], ['subj08']]), 127 | "EXPERIMENTAL.T_IMAGE": tune.grid_search(list(range(0, -32, -1))), 128 | } 129 | name = f"prev" 130 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 131 | 132 | # prev + current 133 | cfg.EXPERIMENTAL.USE_PREV_FRAME = True 134 | tune_config = { 135 | "DATASET.SUBJECT_LIST": tune.grid_search([['subj01'], ['subj02'], ['subj03'], ['subj04'], ['subj05'], ['subj06'], ['subj07'], ['subj08']]), 136 | "EXPERIMENTAL.USE_PREV_FRAME": tune.grid_search([True]), 137 | "EXPERIMENTAL.T_IMAGE": tune.grid_search([0]), 138 | } 139 | name = f"prev_current" 140 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 141 | 142 | # baseline 143 | tune_config = { 144 | "DATASET.SUBJECT_LIST": tune.grid_search([['subj01'], ['subj02'], ['subj03'], ['subj04'], ['subj05'], ['subj06'], ['subj07'], ['subj08']]), 145 | "EXPERIMENTAL.SHUFFLE_IMAGES": tune.grid_search([True]), 146 | } 147 | name = f"rand" 148 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) -------------------------------------------------------------------------------- /mem/scripts_paper/xdad_table_prevframe.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from datamodule import NSDDatamodule 3 | from plmodels import PlVEModel 4 | from read_utils import ( 5 | read_config, 6 | read_short_config, 7 | read_score_df, 8 | list_runs_from_exp_names, 9 | find_runs_from_exp_dir, 10 | read_test_voxel_score, 11 | ) 12 | 13 | import os 14 | import re 15 | import torch 16 | import numpy as np 17 | import pandas as pd 18 | import pytorch_lightning as pl 19 | import cortex 20 | 21 | from PIL import Image 22 | 23 | import copy 24 | 25 | import cortex 26 | from matplotlib import pyplot as plt 27 | 28 | plt.style.use("dark_background") 29 | from config import AutoConfig 30 | 31 | 32 | def get_parser(): 33 | parser = argparse.ArgumentParser(description="Ray Tune") 34 | parser.add_argument( 35 | "--exp_dir", type=str, default="/nfscc/alg23/xdaa/b3", help="exp dir" 36 | ) 37 | parser.add_argument( 38 | "--beta", type=str, default="b3", help="beta" 39 | ) 40 | parser.add_argument( 41 | "--save_dir", type=str, default="/nfscc/fig/alg23/xdad/", help="save dir" 42 | ) 43 | parser.add_argument("--overwrite", action="store_true", help="overwrite") 44 | return parser 45 | 46 | 47 | args = get_parser().parse_args() 48 | 49 | ROIS = ( 50 | [] 51 | + ["Primary_Visual", "Visual", "Somatomotor", "Auditory", "Posterior", "Anterior"] 52 | + [ 53 | "ErC", 54 | "area35", 55 | "area36", 56 | "PhC", 57 | "Sub", 58 | "CA1", 59 | "CA2", 60 | "CA3", 61 | "DG", 62 | "HT", 63 | ] 64 | + ["all"] 65 | ) 66 | 67 | BIG_ROIS = ["all", "Visual", "Somatomotor", "Auditory", "Posterior", "Anterior"] 68 | H_ROIS = [ 69 | "ErC", 70 | "area35", 71 | "area36", 72 | "PhC", 73 | "Sub", 74 | "CA1", 75 | "CA2", 76 | "CA3", 77 | "DG", 78 | "HT", 79 | ] 80 | 81 | 82 | def job(run): 83 | cfg: AutoConfig = read_config(run) 84 | subject = cfg.DATASET.SUBJECT_LIST[0] 85 | t = cfg.EXPERIMENTAL.T_IMAGE 86 | all_t = cfg.EXPERIMENTAL.USE_PREV_FRAME 87 | rand = cfg.EXPERIMENTAL.SHUFFLE_IMAGES 88 | vs = read_test_voxel_score(run) 89 | vs = vs[subject][f"TEST/PearsonCorrCoef/{subject}/all"] 90 | dm = NSDDatamodule(cfg) 91 | dm.setup() 92 | ds = dm.dss[0][subject] 93 | roi_dict = ds.roi_dict 94 | v_list = [] 95 | for roi in ROIS: 96 | v = vs[roi_dict[roi]] 97 | v_list.append(v) 98 | data = (subject, t, all_t, rand, run, v_list) 99 | return data 100 | # datas.append(data) 101 | 102 | beta = args.beta 103 | df_path = f'/tmp/xdac_{beta}.pkl' 104 | if os.path.exists(df_path): 105 | df = torch.load(df_path) 106 | else: 107 | exp_dir = args.exp_dir.replace('b3', beta) 108 | runs = find_runs_from_exp_dir(exp_dir) 109 | print(len(runs)) 110 | 111 | import multiprocessing as mp 112 | 113 | with mp.Pool(16) as pool: 114 | datas = pool.map(job, runs) 115 | 116 | df = pd.DataFrame( 117 | datas, columns=["subject", "t", "all_t", "rand", "run", "vs"] 118 | ).sort_values(["subject", "t", "all_t", "rand"]) 119 | 120 | torch.save(df, df_path) 121 | 122 | order = [ 123 | # (0, True, False), 124 | ] 125 | order += [(t, False, False) for t in range(0, -32, -1)] 126 | order += [(0, True, True)] 127 | 128 | 129 | row1 = (0, True, False) 130 | row2 = (0, False, False) 131 | row3 = (-6, False, False) 132 | row4 = (-28, False, False) 133 | row5 = (0, True, True) 134 | rows = [row1, row2, row3, row4, row5] 135 | row_names = ["T=-32:0", "T=0", "T=-6", "T=-28", "T=rand"] 136 | 137 | def make_table(rois): 138 | 139 | datas = [] 140 | for row in rows: 141 | t, all_t, rand = row 142 | roi_datas = [] 143 | for roi in rois: 144 | subject_vs = [] 145 | for subject in df.subject.unique(): 146 | df_subj = df[(df.subject == subject) & (df.t == t) & (df.all_t == all_t) & (df.rand == rand)] 147 | v = df_subj.vs.values[0][ROIS.index(roi)] 148 | subject_vs.append(v) 149 | subject_vs = np.concatenate(subject_vs) 150 | v = subject_vs.mean() 151 | roi_datas.append(f"{v:.3f}") 152 | datas.append(roi_datas) 153 | 154 | df_table = pd.DataFrame(datas, columns=rois) 155 | # add row names 156 | df_table.insert(0, "T", row_names) 157 | # add beta name 158 | df_table.insert(0, "beta", beta) 159 | 160 | def print_csv(df): 161 | print(df.to_csv(index=False, float_format="%.3f")) 162 | 163 | print_csv(df_table) 164 | 165 | os.makedirs(args.save_dir, exist_ok=True) 166 | df_table.to_csv(os.path.join(args.save_dir, f"xdad_{beta}.csv"), index=False, float_format="%.3f") 167 | 168 | make_table(BIG_ROIS) 169 | make_table(H_ROIS) -------------------------------------------------------------------------------- /mem/scripts_paper/xdae_diff_prevframe.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from datamodule import NSDDatamodule 3 | from plmodels import PlVEModel 4 | from read_utils import ( 5 | read_config, 6 | read_short_config, 7 | read_score_df, 8 | list_runs_from_exp_names, 9 | find_runs_from_exp_dir, 10 | read_test_voxel_score, 11 | ) 12 | 13 | import os 14 | import re 15 | import torch 16 | import numpy as np 17 | import pandas as pd 18 | import pytorch_lightning as pl 19 | import cortex 20 | 21 | from PIL import Image 22 | 23 | import copy 24 | 25 | import cortex 26 | from matplotlib import pyplot as plt 27 | 28 | plt.style.use("dark_background") 29 | from config import AutoConfig 30 | 31 | N_FSAVERAGE = 327684 32 | 33 | 34 | def get_parser(): 35 | parser = argparse.ArgumentParser(description="Ray Tune") 36 | parser.add_argument( 37 | "--exp_dir", type=str, default="/nfscc/alg23/xdaa/", help="exp dir" 38 | ) 39 | 40 | parser.add_argument( 41 | "--save_dir", type=str, default="/nfscc/fig/alg23/xdae/", help="save dir" 42 | ) 43 | 44 | return parser 45 | 46 | 47 | args = get_parser().parse_args() 48 | 49 | def make_df(runs): 50 | datas = [] 51 | for run in runs: 52 | cfg: AutoConfig = read_config(run) 53 | subject = cfg.DATASET.SUBJECT_LIST[0] 54 | t = cfg.EXPERIMENTAL.T_IMAGE 55 | all_t = cfg.EXPERIMENTAL.USE_PREV_FRAME 56 | rand = cfg.EXPERIMENTAL.SHUFFLE_IMAGES 57 | datas.append([subject, t, all_t, rand, run]) 58 | df = pd.DataFrame(datas, columns=["subject", "t", "all_t", "rand", "run"]).sort_values( 59 | ["subject", "t", "all_t", "rand"] 60 | ) 61 | return df 62 | 63 | 64 | def mycolormap(th=0.05, vmax=0.3): 65 | import matplotlib.colors as mcolors 66 | 67 | # Define the colormap 68 | cmap = plt.cm.get_cmap('bwr') 69 | 70 | # Set the minimum and maximum values 71 | vmin = -vmax 72 | 73 | # Create a normalization instance to map the data values to the range [0, 1] 74 | norm = mcolors.Normalize(vmin=vmin, vmax=vmax) 75 | 76 | # # Create a custom colormap with grey color for values between -0.05 and 0.05 77 | # colors = [cmap(norm(vmin))] 78 | # colors.extend([(0.5, 0.5, 0.5, 1), (0.5, 0.5, 0.5, 1)]) 79 | # colors.extend([cmap(norm(vmax))]) 80 | 81 | colors = [] 82 | for i in range(0, 1000): 83 | v = vmin + (vmax - vmin) * i / 1000 84 | if v < th: 85 | colors.append((0.3535, 0.3535, 0.3535, 1)) 86 | else: 87 | colors.append(cmap(norm(v))) 88 | 89 | # Create the custom colormap 90 | custom_cmap = mcolors.LinearSegmentedColormap.from_list('custom_cmap', colors) 91 | 92 | # # Plot a colorbar to show the colormap 93 | # plt.imshow([np.linspace(vmin, vmax, 100)], cmap=custom_cmap, aspect='auto') 94 | # plt.colorbar() 95 | # plt.savefig('/nfscc/fig/tmp_c.png') 96 | # plt.close() 97 | 98 | return custom_cmap 99 | 100 | 101 | def plot_one_run(vs, png_path): 102 | if os.path.exists(png_path): 103 | return 104 | 105 | vmax = 0.5 106 | vmin = -vmax 107 | 108 | cmap = mycolormap(th=-1, vmax=vmax) 109 | vertex_data = cortex.Vertex(vs, "fsaverage", cmap=cmap, vmin=vmin, vmax=vmax) 110 | cortex.quickflat.make_png( 111 | png_path, 112 | vertex_data, 113 | with_curvature=False, 114 | with_rois=False, 115 | with_labels=True, 116 | with_sulci=True, 117 | with_colorbar=False, 118 | ) 119 | plt.close() 120 | 121 | 122 | for beta in ['b2', 'b3']: 123 | 124 | runs = find_runs_from_exp_dir(os.path.join(args.exp_dir, beta)) 125 | df = make_df(runs) 126 | 127 | full = (0, True, False) 128 | t0 = (0, False, False) 129 | subject = 'subj01' 130 | 131 | full_run = df[(df.subject == subject) & (df.t == full[0]) & (df.all_t == full[1]) & (df.rand == full[2])].run.tolist()[0] 132 | full_vs = read_test_voxel_score(full_run)[subject][f"TEST/PearsonCorrCoef/{subject}/all"][:N_FSAVERAGE] 133 | t0_run = df[(df.subject == subject) & (df.t == t0[0]) & (df.all_t == t0[1]) & (df.rand == t0[2])].run.tolist()[0] 134 | t0_vs = read_test_voxel_score(t0_run)[subject][f"TEST/PearsonCorrCoef/{subject}/all"][:N_FSAVERAGE] 135 | 136 | os.makedirs('/tmp/xdae', exist_ok=True) 137 | plot_one_run(full_vs, f'/tmp/xdae/{beta}_a.png') 138 | plot_one_run(t0_vs, f'/tmp/xdae/{beta}_b.png') 139 | plot_one_run(t0_vs - full_vs, f'/tmp/xdae/{beta}_c.png') 140 | 141 | 142 | import matplotlib.gridspec as gridspec 143 | 144 | fig = plt.figure(tight_layout=True, figsize=(16, 10)) 145 | gs = gridspec.GridSpec(3, 2) 146 | 147 | ax = fig.add_subplot(gs[0, :1]) 148 | ax.imshow(Image.open(f'/tmp/xdae/{beta}_a.png')) 149 | ax.axis("off") 150 | ax.set_title("A: T=-32:0", fontsize=24) 151 | 152 | ax = fig.add_subplot(gs[0, 1:]) 153 | ax.imshow(Image.open(f'/tmp/xdae/{beta}_b.png')) 154 | ax.axis("off") 155 | ax.set_title("B: T=0", fontsize=24) 156 | 157 | ax = fig.add_subplot(gs[1:, :]) 158 | ax.imshow(Image.open(f'/tmp/xdae/{beta}_c.png')) 159 | ax.axis("off") 160 | ax.set_title("B - A", fontsize=24) 161 | 162 | save_dir = args.save_dir 163 | os.makedirs(save_dir, exist_ok=True) 164 | save_path = os.path.join(save_dir, f'{beta}.png') 165 | plt.savefig(save_path, dpi=144) 166 | plt.close() -------------------------------------------------------------------------------- /mem/scripts_paper/xdcaa_topyneck.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | 37 | return parser 38 | 39 | 40 | @my_nfs_cluster_job 41 | def job(tune_dict, cfg, progress=False, **kwargs): 42 | if "row" in tune_dict: 43 | global ROW_LIST 44 | row = tune_dict["row"] 45 | tune_dict.pop("row") 46 | print(ROW_LIST[row]) 47 | tune_dict.update(ROW_LIST[row]) 48 | 49 | cfg.merge_from_list(dict_to_list(tune_dict)) 50 | 51 | 52 | cfg = max_batch_size(cfg) 53 | 54 | ret = simple_train( 55 | cfg=cfg, 56 | progress=progress, 57 | rm_soup=False, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | # print traceback 87 | import traceback 88 | 89 | traceback.print_exc() 90 | 91 | 92 | # ROW_LIST = [ 93 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 94 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 95 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 96 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 97 | # ] 98 | # - 99 | if __name__ == "__main__": 100 | parser = get_parser() 101 | args = parser.parse_args() 102 | t = None 103 | 104 | cfg = load_from_yaml("/workspace/configs/dev.yaml") 105 | cfg.DATASET.SUBJECT_LIST = ['subj01'] 106 | cfg.DATASET.ROIS = ["all"] 107 | cfg.DATASET.FMRI_SPACE = 'fship' 108 | 109 | cfg.RESULTS_DIR = "/nfscc/alg23/xdcaa/" 110 | 111 | tune_config = { 112 | "REGULARIZER.LAYER": tune.grid_search([3e-3, 1e-3, 3e-4, 1e-4, 3e-5]), # inspect layer selector histogram and pick the best, manually 113 | } 114 | name = f"topyneck" 115 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 116 | -------------------------------------------------------------------------------- /mem/scripts_paper/xdcab_gather.py: -------------------------------------------------------------------------------- 1 | # %% 2 | import argparse 3 | import os 4 | from matplotlib import ticker 5 | import numpy as np 6 | from sympy import Line2D 7 | from config import AutoConfig 8 | from config_utils import get_cfg_defaults 9 | import matplotlib.pyplot as plt 10 | import pandas as pd 11 | import torch 12 | 13 | from read_utils import read_config, read_short_config, read_score_df, list_runs_from_exp_names 14 | # %% 15 | def get_parser(): 16 | parser = argparse.ArgumentParser(description="Ray Tune") 17 | parser.add_argument("--save_dir", type=str, default="/nfscc/alg23/xdcab/dev", help="save dir") 18 | parser.add_argument("--exp_dir", type=str, default="/nfscc/alg23/xdcaa/topyneck", help="exp dir") 19 | return parser 20 | args = get_parser().parse_args() 21 | # %% 22 | save_dir = args.save_dir 23 | 24 | run_dirs = list_runs_from_exp_names( 25 | [""], exp_dir=f"{args.exp_dir}" 26 | ) 27 | print(len(run_dirs)) 28 | 29 | # %% 30 | datas = [] 31 | for run in run_dirs: 32 | cfg: AutoConfig = read_config(run) 33 | subject = cfg.DATASET.SUBJECT_LIST[0] 34 | reg = cfg.REGULARIZER.LAYER 35 | val_score = torch.load(os.path.join(run, "soup_val_score.pth")) 36 | test_score = torch.load(os.path.join(run, "soup_test_score.pth")) 37 | datas.append([subject, reg, val_score, test_score, run]) 38 | df = pd.DataFrame(datas, columns=["subject", "reg", "val_score", 'test_score', 'run']) 39 | # add mean_score column 40 | df['mean_score'] = df[['val_score', 'test_score']].mean(axis=1) 41 | # %% 42 | topyneck_dict = {} 43 | for subject in df.subject.unique(): 44 | # find best reg 45 | df_subject = df[df.subject == subject] 46 | best_reg = df_subject[df_subject.mean_score == df_subject.mean_score.max()].reg.values[0] 47 | print(f"subject: {subject}, best reg: {best_reg}, best score: {df_subject.mean_score.max()}") 48 | best_run = df_subject[df_subject.mean_score == df_subject.mean_score.max()].run.values[0] 49 | 50 | sd = torch.load(os.path.join(best_run, "soup.pth"), map_location=torch.device('cpu')) 51 | names = ['retina_mapper', 'layer_selector'] 52 | sd = {k: v for k, v in sd.items() if any(name in k for name in names)} 53 | topyneck_dict.update(sd) 54 | os.makedirs(save_dir, exist_ok=True) 55 | save_path = os.path.join(save_dir, "topyneck.pth") 56 | torch.save(topyneck_dict, save_path) 57 | print(f'saved to {save_path}') -------------------------------------------------------------------------------- /mem/scripts_paper/xdcad_ablation_table.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from datamodule import NSDDatamodule 3 | from plmodels import PlVEModel 4 | from read_utils import ( 5 | read_config, 6 | read_short_config, 7 | read_score_df, 8 | list_runs_from_exp_names, 9 | find_runs_from_exp_dir, 10 | read_test_voxel_score, 11 | ) 12 | 13 | import os 14 | import re 15 | import torch 16 | import numpy as np 17 | import pandas as pd 18 | import pytorch_lightning as pl 19 | import cortex 20 | 21 | from PIL import Image 22 | 23 | import copy 24 | 25 | import cortex 26 | from matplotlib import pyplot as plt 27 | 28 | plt.style.use("dark_background") 29 | from config import AutoConfig 30 | 31 | 32 | def get_parser(): 33 | parser = argparse.ArgumentParser(description="Ray Tune") 34 | parser.add_argument( 35 | "--exp_dir", type=str, default="/nfscc/alg23/xdcac/b3", help="exp dir" 36 | ) 37 | parser.add_argument( 38 | "--beta", type=str, default="b3", help="beta" 39 | ) 40 | parser.add_argument( 41 | "--save_dir", type=str, default="/nfscc/fig/alg23/xdcad/", help="save dir" 42 | ) 43 | parser.add_argument("--overwrite", action="store_true", help="overwrite") 44 | return parser 45 | 46 | 47 | args = get_parser().parse_args() 48 | 49 | ROIS = ( 50 | [] 51 | + ["Primary_Visual", "Visual", "Posterior", "Somatomotor", "Auditory", "Anterior"] 52 | + ["all"] 53 | ) 54 | 55 | # BIG_ROIS = ["all", "Visual", "Somatomotor", "Auditory", "Posterior", "Anterior"] 56 | 57 | # VISUAL_ROIS = ["Primary_Visual", "Visual", "Posterior", "Somatomotor", "Auditory", "Anterior"] 58 | 59 | 60 | def job(run): 61 | cfg: AutoConfig = read_config(run) 62 | tune_dict = read_short_config(run) 63 | subject = cfg.DATASET.SUBJECT_LIST[0] 64 | # t = cfg.EXPERIMENTAL.T_IMAGE 65 | # all_t = cfg.EXPERIMENTAL.USE_PREV_FRAME 66 | # rand = cfg.EXPERIMENTAL.SHUFFLE_IMAGES 67 | row = tune_dict["row"] 68 | vs = read_test_voxel_score(run) 69 | vs = vs[subject][f"TEST/PearsonCorrCoef/{subject}/all"] 70 | dm = NSDDatamodule(cfg) 71 | dm.setup() 72 | ds = dm.dss[0][subject] 73 | roi_dict = ds.roi_dict 74 | v_list = [] 75 | for roi in ROIS: 76 | v = vs[roi_dict[roi]].mean() 77 | v_list.append(v) 78 | data = (subject, row, run, *v_list) 79 | return data 80 | # datas.append(data) 81 | 82 | beta = args.beta 83 | df_path = f'/tmp/xdcad_{beta}.pkl' 84 | # if os.path.exists(df_path): 85 | # df = torch.load(df_path) 86 | # else: 87 | exp_dir = args.exp_dir.replace('b3', beta) 88 | runs = find_runs_from_exp_dir(exp_dir) 89 | print(len(runs)) 90 | 91 | import multiprocessing as mp 92 | 93 | with mp.Pool(16) as pool: 94 | datas = pool.map(job, runs) 95 | 96 | df = pd.DataFrame( 97 | datas, columns=["subject", "row", "run", *ROIS] 98 | ).sort_values(["subject", "row"]) 99 | 100 | torch.save(df, df_path) 101 | 102 | 103 | def print_csv(df): 104 | print(df.to_csv(index=False, float_format="%.3f")) 105 | 106 | hide_col = ['subject', 'run'] 107 | df = df.drop(columns=hide_col) 108 | 109 | # mean over same row 110 | mean_df = df.groupby(['row']).mean().reset_index() 111 | std_df = df.groupby(['row']).std().reset_index() 112 | 113 | print_csv(mean_df) 114 | print_csv(std_df) 115 | 116 | -------------------------------------------------------------------------------- /mem/scripts_paper/xdea_before_after.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | return parser 40 | 41 | 42 | @my_nfs_cluster_job 43 | def job(tune_dict, cfg, progress=False, **kwargs): 44 | if "row" in tune_dict: 45 | global ROW_LIST 46 | row = tune_dict["row"] 47 | tune_dict.pop("row") 48 | print(ROW_LIST[row]) 49 | tune_dict.update(ROW_LIST[row]) 50 | 51 | cfg.merge_from_list(dict_to_list(tune_dict)) 52 | 53 | cfg = max_batch_size(cfg) 54 | 55 | ret = simple_train( 56 | cfg=cfg, 57 | progress=progress, 58 | rm_soup=True, 59 | topyneck_path="/nfscc/alg23/xdcab/dev/topyneck.pth", 60 | **kwargs, 61 | ) 62 | 63 | 64 | def run_ray( 65 | name, 66 | cfg, 67 | tune_config, 68 | rm=False, 69 | progress=False, 70 | verbose=False, 71 | num_samples=1, 72 | time_budget_s=None, 73 | ): 74 | cfg = copy.deepcopy(cfg) 75 | if rm: 76 | import shutil 77 | 78 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 79 | 80 | try: 81 | ana = tune.run( 82 | tune.with_parameters(job, cfg=cfg, progress=progress), 83 | local_dir=cfg.RESULTS_DIR, 84 | config=tune_config, 85 | resources_per_trial={"cpu": 1, "gpu": 1}, 86 | num_samples=num_samples, 87 | name=name, 88 | verbose=verbose, 89 | resume="AUTO+ERRORED", 90 | trial_dirname_creator=trial_dirname_creator, 91 | time_budget_s=time_budget_s, 92 | ) 93 | except Exception as e: 94 | print(e) 95 | # print traceback 96 | import traceback 97 | 98 | traceback.print_exc() 99 | 100 | 101 | answer = np.concatenate([np.arange(0, 8), np.arange(21, 35)]).tolist() 102 | memory = np.arange(8, 19).tolist() 103 | time = np.arange(19, 21).tolist() 104 | 105 | full = np.arange(0, 35).tolist() 106 | 107 | no_answer = [i for i in full if i not in answer] 108 | no_memory = [i for i in full if i not in memory] 109 | no_time = [i for i in full if i not in time] 110 | 111 | ROW_LIST = [ 112 | { 113 | "EXPERIMENTAL.USE_PREV_FRAME": True, 114 | "EXPERIMENTAL.USE_BHV": True, 115 | "EXPERIMENTAL.USE_BHV_PASSTHROUGH": True, 116 | }, # after 117 | { 118 | "EXPERIMENTAL.USE_PREV_FRAME": False, 119 | "EXPERIMENTAL.USE_BHV": False, 120 | "EXPERIMENTAL.USE_BHV_PASSTHROUGH": False, 121 | }, # before 122 | ] 123 | # - 124 | if __name__ == "__main__": 125 | parser = get_parser() 126 | args = parser.parse_args() 127 | t = args.time if args.time > 0 else None 128 | 129 | cfg = load_from_yaml("/workspace/configs/dev_B.yaml") 130 | 131 | cfg.DATASET.SUBJECT_LIST = ["subj01"] 132 | cfg.DATASET.ROIS = ["all"] 133 | cfg.DATASET.FMRI_SPACE = "fship_b2" 134 | 135 | cfg.TRAINER.LIMIT_TRAIN_BATCHES = 0.5 136 | cfg.TRAINER.LIMIT_VAL_BATCHES = 0.5 137 | cfg.TRAINER.CALLBACKS.EARLY_STOP.PATIENCE = 30 138 | 139 | cfg.RESULTS_DIR = "/nfscc/alg23/xdea/b2" 140 | 141 | cfg.EXPERIMENTAL.USE_DEV_MODEL = True 142 | 143 | tune_config = { 144 | "row": tune.grid_search(list(range(len(ROW_LIST)))), 145 | } 146 | name = f"ba" 147 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 148 | -------------------------------------------------------------------------------- /mem/scripts_tune/tune_b2b3.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--time", type=int, default=-1, help="Time limit of the experiment" 35 | ) 36 | return parser 37 | 38 | @my_nfs_cluster_job 39 | def job(tune_dict, cfg, progress=False, **kwargs): 40 | if "row" in tune_dict: 41 | global ROW_LIST 42 | row = tune_dict["row"] 43 | tune_dict.pop("row") 44 | print(ROW_LIST[row]) 45 | tune_dict.update(ROW_LIST[row]) 46 | 47 | cfg.merge_from_list(dict_to_list(tune_dict)) 48 | 49 | cfg = max_batch_size(cfg) 50 | 51 | ret = simple_train( 52 | cfg=cfg, 53 | progress=progress, 54 | rm_soup=True, 55 | **kwargs, 56 | ) 57 | 58 | 59 | def run_ray( 60 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 61 | ): 62 | cfg = copy.deepcopy(cfg) 63 | if rm: 64 | import shutil 65 | 66 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 67 | 68 | try: 69 | ana = tune.run( 70 | tune.with_parameters(job, cfg=cfg, progress=progress), 71 | local_dir=cfg.RESULTS_DIR, 72 | config=tune_config, 73 | resources_per_trial={"cpu": 1, "gpu": 1}, 74 | num_samples=num_samples, 75 | name=name, 76 | verbose=verbose, 77 | resume="AUTO+ERRORED", 78 | trial_dirname_creator=trial_dirname_creator, 79 | time_budget_s=time_budget_s 80 | ) 81 | except Exception as e: 82 | print(e) 83 | # print traceback 84 | import traceback 85 | 86 | traceback.print_exc() 87 | 88 | 89 | # ROW_LIST = [ 90 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 91 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 92 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 93 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 94 | # ] 95 | # - 96 | if __name__ == "__main__": 97 | parser = get_parser() 98 | args = parser.parse_args() 99 | t = args.time if args.time > 0 else None 100 | 101 | cfg = load_from_yaml("/workspace/configs/dev.yaml") 102 | 103 | cfg = get_cfg_defaults() 104 | cfg.DATASET.ROIS = ["all"] 105 | cfg.DATASET.FMRI_SPACE = 'fship_b2' 106 | cfg.DATASET.SUBJECT_LIST = ['subj01'] 107 | 108 | cfg.RESULTS_DIR = "/nfscc/ray_results/b2b3/" 109 | 110 | cfg.EXPERIMENTAL.USE_DEV_MODEL = True 111 | 112 | cfg.EXPERIMENTAL.USE_PREV_FRAME = True 113 | cfg.EXPERIMENTAL.BLANK_IMAGE = False 114 | cfg.EXPERIMENTAL.USE_RETINA_MAPPER = False 115 | cfg.EXPERIMENTAL.USE_LAYER_SELECTOR = False 116 | cfg.EXPERIMENTAL.USE_BHV = True 117 | cfg.EXPERIMENTAL.USE_BHV_PASSTHROUGH = True 118 | cfg.EXPERIMENTAL.BACKBONE_NOGRAD = True 119 | 120 | tune_config = { 121 | "DATASET.FMRI_SPACE": tune.grid_search(['fship_b2', 'fship']), 122 | } 123 | name = f"b2b3" 124 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 3, t) -------------------------------------------------------------------------------- /mem/scripts_tune/tune_backbone.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | return parser 37 | 38 | BACKBONEC = { 39 | 'clip_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 40 | 'clip_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 41 | 'clip_vit_s': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 42 | 'eva_clip_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 43 | 'eva_clip_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 44 | 'dinov2_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 45 | 'dinov2_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 46 | 'dinov2_vit_s': (224, [2, 5, 8, 11], [384, 384, 384, 384], [768, 768, 768, 384]), 47 | } 48 | 49 | @my_nfs_cluster_job 50 | def job(tune_dict, cfg, progress=False, **kwargs): 51 | if "row" in tune_dict: 52 | global ROW_LIST 53 | row = tune_dict["row"] 54 | tune_dict.pop("row") 55 | print(ROW_LIST[row]) 56 | tune_dict.update(ROW_LIST[row]) 57 | 58 | cfg.merge_from_list(dict_to_list(tune_dict)) 59 | 60 | reso, layers, dim, dim2 = BACKBONEC[cfg.MODEL.BACKBONE.NAME] 61 | cfg.DATASET.IMAGE_RESOLUTION = [reso, reso] 62 | cfg.MODEL.BACKBONE.LAYERS = layers 63 | cfg.MODEL.BACKBONE.FEATURE_DIMS = dim 64 | cfg.MODEL.BACKBONE.CLS_DIMS = dim2 65 | 66 | cfg = max_batch_size(cfg) 67 | 68 | ret = simple_train( 69 | cfg=cfg, 70 | progress=progress, 71 | **kwargs, 72 | ) 73 | 74 | 75 | def run_ray( 76 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1 77 | ): 78 | cfg = copy.deepcopy(cfg) 79 | if rm: 80 | import shutil 81 | 82 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 83 | 84 | ana = tune.run( 85 | tune.with_parameters(job, cfg=cfg, progress=progress), 86 | local_dir=cfg.RESULTS_DIR, 87 | config=tune_config, 88 | resources_per_trial={"cpu": 1, "gpu": 1}, 89 | num_samples=num_samples, 90 | name=name, 91 | verbose=verbose, 92 | resume="AUTO+ERRORED", 93 | trial_dirname_creator=trial_dirname_creator, 94 | ) 95 | 96 | 97 | # ROW_LIST = [ 98 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 99 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 100 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 101 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 102 | # ] 103 | # - 104 | if __name__ == "__main__": 105 | parser = get_parser() 106 | args = parser.parse_args() 107 | 108 | cfg = load_from_yaml("/workspace/configs/xvaa.yaml") 109 | 110 | cfg.DATASET.SUBJECT_LIST = ['subj01'] 111 | 112 | cfg.RESULTS_DIR = "/nfscc/ray_results/backbone" 113 | 114 | tune_config = { 115 | # "DATASET.SUBJECT_LIST": tune.grid_search([["subj01"], ["subj04"], ['subj06'], ['subj08']]), 116 | # "MODEL.BACKBONE.NAME": tune.grid_search(['dinov2_vit_l', 'dinov2_vit_b', 'eva_clip_l', 'eva_clip_b', 'clip_vit_l', 'clip_vit_b']), 117 | "MODEL.BACKBONE.NAME": tune.grid_search(['eva_clip_l', 'eva_clip_b']), 118 | } 119 | name = "eva" 120 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 4) 121 | -------------------------------------------------------------------------------- /mem/scripts_tune/tune_behvmkii.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | return parser 37 | 38 | BACKBONEC = { 39 | 'clip_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 40 | 'clip_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 41 | 'clip_vit_s': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 42 | 'dinov2_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 43 | 'dinov2_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 44 | 'dinov2_vit_s': (224, [2, 5, 8, 11], [384, 384, 384, 384], [768, 768, 768, 384]), 45 | } 46 | 47 | @my_nfs_cluster_job 48 | def job(tune_dict, cfg, progress=False, **kwargs): 49 | if "row" in tune_dict: 50 | global ROW_LIST 51 | row = tune_dict["row"] 52 | tune_dict.pop("row") 53 | print(ROW_LIST[row]) 54 | tune_dict.update(ROW_LIST[row]) 55 | 56 | cfg.merge_from_list(dict_to_list(tune_dict)) 57 | 58 | reso, layers, dim, dim2 = BACKBONEC[cfg.MODEL.BACKBONE.NAME] 59 | cfg.DATASET.IMAGE_RESOLUTION = [reso, reso] 60 | cfg.MODEL.BACKBONE.LAYERS = layers 61 | cfg.MODEL.BACKBONE.FEATURE_DIMS = dim 62 | cfg.MODEL.BACKBONE.CLS_DIMS = dim2 63 | 64 | cfg = max_batch_size(cfg) 65 | 66 | ret = simple_train( 67 | cfg=cfg, 68 | progress=progress, 69 | **kwargs, 70 | ) 71 | 72 | 73 | def run_ray( 74 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1 75 | ): 76 | cfg = copy.deepcopy(cfg) 77 | if rm: 78 | import shutil 79 | 80 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 81 | 82 | ana = tune.run( 83 | tune.with_parameters(job, cfg=cfg, progress=progress), 84 | local_dir=cfg.RESULTS_DIR, 85 | config=tune_config, 86 | resources_per_trial={"cpu": 1, "gpu": 1}, 87 | num_samples=num_samples, 88 | name=name, 89 | verbose=verbose, 90 | resume="AUTO+ERRORED", 91 | trial_dirname_creator=trial_dirname_creator, 92 | ) 93 | 94 | 95 | # ROW_LIST = [ 96 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 97 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 98 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 99 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 100 | # ] 101 | # - 102 | if __name__ == "__main__": 103 | parser = get_parser() 104 | args = parser.parse_args() 105 | 106 | # cfg = load_from_yaml("/workspace/configs/dino_t1.yaml") 107 | 108 | cfg = get_cfg_defaults() 109 | cfg.DATAMODULE.BATCH_SIZE = 16 110 | cfg.TRAINER.ACCUMULATE_GRAD_BATCHES = 1 111 | cfg.OPTIMIZER.LR = 1e-3 112 | cfg.OPTIMIZER.NAME = "AdamW" 113 | cfg.DATASET.ROIS = ["orig"] 114 | cfg.DATASET.FMRI_SPACE = 'fsaverage' 115 | cfg.DATASET.SUBJECT_LIST = ["subj01"] 116 | cfg.MODEL.BACKBONE.NAME = "dinov2_vit_s" 117 | cfg.TRAINER.CALLBACKS.EARLY_STOP.PATIENCE = 20 118 | cfg.MODEL.CONV_HEAD.SIMPLE = True 119 | cfg.MODEL.CONV_HEAD.WIDTH = 64 120 | cfg.MODEL.CONV_HEAD.MAX_DIM = 64 121 | cfg.MODEL.MAX_TRAIN_VOXELS = 1145141919810 122 | 123 | cfg.TRAINER.PRECISION = 16 124 | 125 | cfg.TRAINER.LIMIT_TRAIN_BATCHES = 0.1 126 | cfg.TRAINER.LIMIT_VAL_BATCHES = 0.5 127 | 128 | cfg.RESULTS_DIR = "/nfscc/ray_results/behavior_mkii" 129 | 130 | cfg.EXPERIMENTAL.USE_PREV_FRAME = False 131 | cfg.EXPERIMENTAL.STRAIGHT_FORWARD = True 132 | cfg.EXPERIMENTAL.STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD = True 133 | cfg.EXPERIMENTAL.ANOTHER_SPLIT = False 134 | cfg.EXPERIMENTAL.USE_DEV_MODEL = True 135 | cfg.EXPERIMENTAL.BLANK_IMAGE = True 136 | cfg.EXPERIMENTAL.BEHV_ONLY = True 137 | 138 | cfg.MODEL.BACKBONE.LORA.SCALE = 0.2 139 | cfg.MODEL.BACKBONE.ADAPTIVE_LN.SCALE = 0.5 140 | tune_config = { 141 | "DATASET.SUBJECT_LIST": tune.grid_search([["subj01"], ['subj01', 'subj02', 'subj03', 'subj04', 'subj05', 'subj06', 'subj07', 'subj08']]), 142 | "DATAMODULE.BATCH_SIZE": tune.sample_from(lambda spec: len(spec.config['DATASET.SUBJECT_LIST']) * 16), 143 | # "EXPERIMENTAL.T_IMAGE": tune.grid_search([0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16]), 144 | # "EXPERIMENTAL.BLANK_IMAGE": tune.grid_search([True, False]), 145 | # "MODEL.BACKBONE.ADAPTIVE_LN.SCALE": tune.grid_search([0.5, 0.0]), 146 | # "EXPERIMENTAL.BEHV_SELECTION": tune.grid_search([p, f, pf, c]), 147 | # "MODEL.COND.IN_DIM": tune.sample_from(lambda spec: int(len(spec.config['EXPERIMENTAL.BEHV_SELECTION'])) if spec.config['EXPERIMENTAL.BEHV_SELECTION'] != [-1] else 108), 148 | # "EXPERIMENTAL.T_IMAGE": tune.grid_search([-1, -2, -3, -4, -5, -6, -7, -8, -9]), 149 | } 150 | name = "subjects" 151 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1) -------------------------------------------------------------------------------- /mem/scripts_tune/tune_bottleneck.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | return parser 37 | 38 | BACKBONEC = { 39 | 'clip_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 40 | 'clip_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 41 | 'clip_vit_s': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 42 | 'dinov2_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 43 | 'dinov2_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 44 | 'dinov2_vit_s': (224, [2, 5, 8, 11], [384, 384, 384, 384], [768, 768, 768, 384]), 45 | } 46 | 47 | @my_nfs_cluster_job 48 | def job(tune_dict, cfg, progress=False, **kwargs): 49 | if "row" in tune_dict: 50 | global ROW_LIST 51 | row = tune_dict["row"] 52 | tune_dict.pop("row") 53 | print(ROW_LIST[row]) 54 | tune_dict.update(ROW_LIST[row]) 55 | 56 | cfg.merge_from_list(dict_to_list(tune_dict)) 57 | 58 | reso, layers, dim, dim2 = BACKBONEC[cfg.MODEL.BACKBONE.NAME] 59 | cfg.DATASET.IMAGE_RESOLUTION = [reso, reso] 60 | cfg.MODEL.BACKBONE.LAYERS = layers 61 | cfg.MODEL.BACKBONE.FEATURE_DIMS = dim 62 | cfg.MODEL.BACKBONE.CLS_DIMS = dim2 63 | 64 | cfg = max_batch_size(cfg) 65 | 66 | ret = simple_train( 67 | cfg=cfg, 68 | progress=progress, 69 | **kwargs, 70 | ) 71 | 72 | 73 | def run_ray( 74 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1 75 | ): 76 | cfg = copy.deepcopy(cfg) 77 | if rm: 78 | import shutil 79 | 80 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 81 | 82 | ana = tune.run( 83 | tune.with_parameters(job, cfg=cfg, progress=progress), 84 | local_dir=cfg.RESULTS_DIR, 85 | config=tune_config, 86 | resources_per_trial={"cpu": 1, "gpu": 1}, 87 | num_samples=num_samples, 88 | name=name, 89 | verbose=verbose, 90 | resume="AUTO+ERRORED", 91 | trial_dirname_creator=trial_dirname_creator, 92 | ) 93 | 94 | 95 | ROW_LIST = [ 96 | {"MODEL.BOTTLENECK.RANK": 8, 97 | "MODEL.BOTTLENECK.OUT_DIM": 8, 98 | "MODEL.CONV_HEAD.WIDTH": 256,}, 99 | {"MODEL.BOTTLENECK.RANK": 16, 100 | "MODEL.BOTTLENECK.OUT_DIM": 16, 101 | "MODEL.CONV_HEAD.WIDTH": 256,}, 102 | {"MODEL.BOTTLENECK.RANK": 32, 103 | "MODEL.BOTTLENECK.OUT_DIM": 32, 104 | "MODEL.CONV_HEAD.WIDTH": 256,}, 105 | {"MODEL.BOTTLENECK.RANK": -1, 106 | "MODEL.BOTTLENECK.OUT_DIM": 1, 107 | "MODEL.CONV_HEAD.WIDTH": 256,}, 108 | ] 109 | # - 110 | if __name__ == "__main__": 111 | parser = get_parser() 112 | args = parser.parse_args() 113 | 114 | # cfg = load_from_yaml("/workspace/configs/dino_t1.yaml") 115 | 116 | cfg = get_cfg_defaults() 117 | cfg.DATAMODULE.BATCH_SIZE = 16 118 | cfg.TRAINER.ACCUMULATE_GRAD_BATCHES = 2 119 | cfg.OPTIMIZER.LR = 1e-3 120 | cfg.OPTIMIZER.NAME = "AdamW" 121 | cfg.DATASET.ROIS = ["all"] 122 | cfg.DATASET.FMRI_SPACE = 'fsaverage' 123 | cfg.DATASET.SUBJECT_LIST = ["subj01"] 124 | 125 | cfg.TRAINER.PRECISION = 16 126 | 127 | cfg.TRAINER.LIMIT_TRAIN_BATCHES = 0.1 128 | cfg.TRAINER.LIMIT_VAL_BATCHES = 1.0 129 | 130 | cfg.MODEL.COND.DROPOUT = 0.2 131 | cfg.MODEL.BACKBONE.LORA.SCALE = 0.2 132 | 133 | cfg.RESULTS_DIR = "/nfscc/ray_results/bottleneck" 134 | 135 | 136 | cfg.EXPERIMENTAL.USE_PREV_FRAME = False 137 | 138 | cfg.MODEL.BACKBONE.ADAPTIVE_LN.SCALE = 0.5 139 | 140 | tune_config = { 141 | # "DATASET.SUBJECT_LIST": tune.grid_search([["subj01"], ["subj04"]]), 142 | "row": tune.grid_search(list(range(len(ROW_LIST)))), 143 | } 144 | name = "linear" 145 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 3) 146 | -------------------------------------------------------------------------------- /mem/scripts_tune/tune_fs_1mm.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | return parser 40 | 41 | BACKBONEC = { 42 | 'clip_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 43 | 'clip_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 44 | 'clip_vit_s': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 45 | 'dinov2_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 46 | 'dinov2_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 47 | 'dinov2_vit_s': (224, [2, 5, 8, 11], [384, 384, 384, 384], [768, 768, 768, 384]), 48 | } 49 | 50 | @my_nfs_cluster_job 51 | def job(tune_dict, cfg, progress=False, **kwargs): 52 | if "row" in tune_dict: 53 | global ROW_LIST 54 | row = tune_dict["row"] 55 | tune_dict.pop("row") 56 | print(ROW_LIST[row]) 57 | tune_dict.update(ROW_LIST[row]) 58 | 59 | cfg.merge_from_list(dict_to_list(tune_dict)) 60 | 61 | reso, layers, dim, dim2 = BACKBONEC[cfg.MODEL.BACKBONE.NAME] 62 | cfg.DATASET.IMAGE_RESOLUTION = [reso, reso] 63 | cfg.MODEL.BACKBONE.LAYERS = layers 64 | cfg.MODEL.BACKBONE.FEATURE_DIMS = dim 65 | cfg.MODEL.BACKBONE.CLS_DIMS = dim2 66 | 67 | cfg = max_batch_size(cfg) 68 | 69 | ret = simple_train( 70 | cfg=cfg, 71 | progress=progress, 72 | **kwargs, 73 | ) 74 | 75 | 76 | def run_ray( 77 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 78 | ): 79 | cfg = copy.deepcopy(cfg) 80 | if rm: 81 | import shutil 82 | 83 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 84 | 85 | try: 86 | ana = tune.run( 87 | tune.with_parameters(job, cfg=cfg, progress=progress), 88 | local_dir=cfg.RESULTS_DIR, 89 | config=tune_config, 90 | resources_per_trial={"cpu": 1, "gpu": 1}, 91 | num_samples=num_samples, 92 | name=name, 93 | verbose=verbose, 94 | resume="AUTO+ERRORED", 95 | trial_dirname_creator=trial_dirname_creator, 96 | time_budget_s=time_budget_s 97 | ) 98 | except Exception as e: 99 | print(e) 100 | # print traceback 101 | import traceback 102 | 103 | traceback.print_exc() 104 | 105 | 106 | # ROW_LIST = [ 107 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 108 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 109 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 110 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 111 | # ] 112 | # - 113 | if __name__ == "__main__": 114 | parser = get_parser() 115 | args = parser.parse_args() 116 | t = args.time if args.time > 0 else None 117 | 118 | cfg = get_cfg_defaults() 119 | cfg.DATAMODULE.BATCH_SIZE = 16 120 | cfg.DATAMODULE.NUM_WORKERS = 8 121 | cfg.TRAINER.ACCUMULATE_GRAD_BATCHES = 1 122 | cfg.OPTIMIZER.LR = 1e-3 123 | cfg.OPTIMIZER.NAME = "AdamW" 124 | cfg.DATASET.ROIS = ["orig"] 125 | cfg.DATASET.FMRI_SPACE = 'fship' 126 | cfg.DATASET.SUBJECT_LIST = ["subj01"] 127 | cfg.MODEL.BACKBONE.NAME = "dinov2_vit_b" 128 | cfg.TRAINER.CALLBACKS.EARLY_STOP.PATIENCE = 10 129 | cfg.MODEL.CONV_HEAD.SIMPLE = True 130 | cfg.MODEL.CONV_HEAD.WIDTH = 256 131 | cfg.MODEL.CONV_HEAD.MAX_DIM = 768 132 | cfg.MODEL.MAX_TRAIN_VOXELS = 1145141919810 133 | cfg.TRAINER.PRECISION = 16 134 | cfg.TRAINER.LIMIT_TRAIN_BATCHES = 0.3 135 | cfg.TRAINER.LIMIT_VAL_BATCHES = 1.0 136 | 137 | cfg.RESULTS_DIR = "/nfscc/ray_results/fs_1mm/" 138 | 139 | 140 | cfg.EXPERIMENTAL.USE_PREV_FRAME = False 141 | cfg.EXPERIMENTAL.STRAIGHT_FORWARD = False 142 | cfg.EXPERIMENTAL.STRAIGHT_FORWARD_BUT_KEEP_BACKBONE_GRAD = False 143 | cfg.EXPERIMENTAL.BLANK_IMAGE = False 144 | cfg.EXPERIMENTAL.ANOTHER_SPLIT = False 145 | cfg.EXPERIMENTAL.SHUFFLE_VAL = False 146 | 147 | cfg.MODEL.BACKBONE.LORA.SCALE = 0.2 148 | cfg.MODEL.BACKBONE.ADAPTIVE_LN.SCALE = 0.5 149 | 150 | tune_config = { 151 | "DATASET.FMRI_SPACE": tune.grid_search(['fsaverage', 'func1mm']), 152 | "DATASET.ROIS": tune.sample_from(lambda spec: ['E', 'ML', 'MV', 'MP', 'L', 'V', 'P'] if spec.config['DATASET.FMRI_SPACE'] == 'fsaverage' else ['nsdgeneral']), 153 | } 154 | name = f"is_1mm_good" 155 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) 156 | -------------------------------------------------------------------------------- /mem/scripts_tune/tune_longtraining.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, modular_train, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | parser.add_argument( 40 | "--topyneck_path", type=str, default="/nfscc/alg23/xvab/mem/topyneck.pth", help="Path to topyneck" 41 | ) 42 | return parser 43 | 44 | 45 | @my_nfs_cluster_job 46 | def job(tune_dict, cfg, progress=False, **kwargs): 47 | topyneck_path = kwargs.pop('topyneck_path') 48 | 49 | cfg.merge_from_list(dict_to_list(tune_dict)) 50 | 51 | cfg = max_batch_size(cfg) 52 | 53 | ret = simple_train( 54 | cfg=cfg, 55 | progress=progress, 56 | topyneck_path=topyneck_path, 57 | rm_soup=True, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None, topyneck_path=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress, topyneck_path=topyneck_path), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | import traceback 87 | 88 | traceback.print_exc() 89 | 90 | 91 | if __name__ == "__main__": 92 | parser = get_parser() 93 | args = parser.parse_args() 94 | t = args.time if args.time > 0 else None 95 | 96 | cfg = load_from_yaml("/workspace/configs/xvba.yaml") 97 | cfg.RESULTS_DIR = "/nfscc/ray_results/longtrain/" 98 | cfg.DATASET.SUBJECT_LIST = ['subj01'] 99 | cfg.TRAINER.MAX_EPOCHS = 1000 100 | cfg.TRAINER.CALLBACKS.EARLY_STOP.PATIENCE = 114514 101 | # cfg.DATAMODULE.BATCH_SIZE = 32 102 | # cfg.OPTIMIZER.LR = 1e-3 103 | 104 | tune_config = { 105 | "DATASET.SUBJECT_LIST": tune.grid_search([['subj01'], ['subj04']]), 106 | "DATASET.ROIS": tune.grid_search([['E'], ['added']]), 107 | } 108 | name = f"onesub" 109 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t, args.topyneck_path) 110 | -------------------------------------------------------------------------------- /mem/scripts_tune/tune_lrbsz.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | return parser 40 | 41 | BACKBONEC = { 42 | 'clip_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 43 | 'clip_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 44 | 'clip_vit_s': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 45 | 'dinov2_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 46 | 'dinov2_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 47 | 'dinov2_vit_s': (224, [2, 5, 8, 11], [384, 384, 384, 384], [768, 768, 768, 384]), 48 | } 49 | 50 | @my_nfs_cluster_job 51 | def job(tune_dict, cfg, progress=False, **kwargs): 52 | if "row" in tune_dict: 53 | global ROW_LIST 54 | row = tune_dict["row"] 55 | tune_dict.pop("row") 56 | print(ROW_LIST[row]) 57 | tune_dict.update(ROW_LIST[row]) 58 | 59 | cfg.merge_from_list(dict_to_list(tune_dict)) 60 | 61 | reso, layers, dim, dim2 = BACKBONEC[cfg.MODEL.BACKBONE.NAME] 62 | cfg.DATASET.IMAGE_RESOLUTION = [reso, reso] 63 | cfg.MODEL.BACKBONE.LAYERS = layers 64 | cfg.MODEL.BACKBONE.FEATURE_DIMS = dim 65 | cfg.MODEL.BACKBONE.CLS_DIMS = dim2 66 | 67 | cfg = max_batch_size(cfg) 68 | 69 | ret = simple_train( 70 | cfg=cfg, 71 | progress=progress, 72 | rm_soup=True, 73 | **kwargs, 74 | ) 75 | 76 | 77 | def run_ray( 78 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 79 | ): 80 | cfg = copy.deepcopy(cfg) 81 | if rm: 82 | import shutil 83 | 84 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 85 | 86 | try: 87 | ana = tune.run( 88 | tune.with_parameters(job, cfg=cfg, progress=progress), 89 | local_dir=cfg.RESULTS_DIR, 90 | config=tune_config, 91 | resources_per_trial={"cpu": 1, "gpu": 1}, 92 | num_samples=num_samples, 93 | name=name, 94 | verbose=verbose, 95 | resume="AUTO+ERRORED", 96 | trial_dirname_creator=trial_dirname_creator, 97 | time_budget_s=time_budget_s 98 | ) 99 | except Exception as e: 100 | print(e) 101 | # print traceback 102 | import traceback 103 | 104 | traceback.print_exc() 105 | 106 | 107 | # ROW_LIST = [ 108 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 109 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 110 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 111 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 112 | # ] 113 | # - 114 | if __name__ == "__main__": 115 | parser = get_parser() 116 | args = parser.parse_args() 117 | t = args.time if args.time > 0 else None 118 | 119 | cfg = load_from_yaml("/workspace/configs/debug.yaml") 120 | 121 | cfg.RESULTS_DIR = "/nfscc/ray_results/lrbsz/" 122 | 123 | tune_config = { 124 | "DATASET.SUBJECT_LIST": tune.grid_search([['subj01']]), 125 | # "DATAMODULE.BATCH_SIZE": tune.grid_search([8, 16, 32]), 126 | # "DATASET.ROIS": tune.grid_search([['all'], ['orig'], ['E'], ['P']]) 127 | "OPTIMIZER.LR": tune.grid_search([1e-3, 3e-4, 1e-4]), 128 | } 129 | name = f"lr" 130 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 3, t) -------------------------------------------------------------------------------- /mem/scripts_tune/tune_modelsize.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | parser.add_argument( 40 | "--topyneck_path", type=str, default="/nfscc/alg23/xvab/mem/topyneck.pth", help="Path to topyneck" 41 | ) 42 | return parser 43 | 44 | 45 | @my_nfs_cluster_job 46 | def job(tune_dict, cfg, progress=False, **kwargs): 47 | topyneck_path = kwargs.pop('topyneck_path') 48 | 49 | cfg.merge_from_list(dict_to_list(tune_dict)) 50 | 51 | cfg = max_batch_size(cfg) 52 | 53 | ret = simple_train( 54 | cfg=cfg, 55 | progress=progress, 56 | rm_soup=True, 57 | topyneck_path=topyneck_path, 58 | **kwargs, 59 | ) 60 | 61 | 62 | def run_ray( 63 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None, topyneck_path=None 64 | ): 65 | cfg = copy.deepcopy(cfg) 66 | if rm: 67 | import shutil 68 | 69 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 70 | 71 | try: 72 | ana = tune.run( 73 | tune.with_parameters(job, cfg=cfg, progress=progress, topyneck_path=topyneck_path), 74 | local_dir=cfg.RESULTS_DIR, 75 | config=tune_config, 76 | resources_per_trial={"cpu": 1, "gpu": 1}, 77 | num_samples=num_samples, 78 | name=name, 79 | verbose=verbose, 80 | resume="AUTO+ERRORED", 81 | trial_dirname_creator=trial_dirname_creator, 82 | time_budget_s=time_budget_s 83 | ) 84 | except Exception as e: 85 | print(e) 86 | # print traceback 87 | import traceback 88 | 89 | traceback.print_exc() 90 | 91 | 92 | if __name__ == "__main__": 93 | parser = get_parser() 94 | args = parser.parse_args() 95 | t = args.time if args.time > 0 else None 96 | 97 | cfg = load_from_yaml("/workspace/configs/xvaa.yaml") 98 | cfg.RESULTS_DIR = "/nfscc/alg23/width/" 99 | cfg.DATASET.ROIS = ['orig'] 100 | # cfg.DATAMODULE.BATCH_SIZE = 32 101 | # cfg.OPTIMIZER.LR = 1e-3 102 | 103 | tune_config = { 104 | "DATASET.SUBJECT_LIST": tune.grid_search([["subj01"]]), 105 | "MODEL.WIDTH_RATIO": tune.grid_search([1.0, 1.25, 1.5, 2.0]), 106 | } 107 | name = f"ratio" 108 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 5, t, args.topyneck_path) 109 | -------------------------------------------------------------------------------- /mem/scripts_tune/tune_morevoxel.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | parser.add_argument( 37 | "--time", type=int, default=-1, help="Time limit of the experiment" 38 | ) 39 | return parser 40 | 41 | BACKBONEC = { 42 | 'clip_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 43 | 'clip_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 44 | 'clip_vit_s': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 45 | 'dinov2_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 46 | 'dinov2_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 47 | 'dinov2_vit_s': (224, [2, 5, 8, 11], [384, 384, 384, 384], [768, 768, 768, 384]), 48 | } 49 | 50 | @my_nfs_cluster_job 51 | def job(tune_dict, cfg, progress=False, **kwargs): 52 | if "row" in tune_dict: 53 | global ROW_LIST 54 | row = tune_dict["row"] 55 | tune_dict.pop("row") 56 | print(ROW_LIST[row]) 57 | tune_dict.update(ROW_LIST[row]) 58 | 59 | cfg.merge_from_list(dict_to_list(tune_dict)) 60 | 61 | reso, layers, dim, dim2 = BACKBONEC[cfg.MODEL.BACKBONE.NAME] 62 | cfg.DATASET.IMAGE_RESOLUTION = [reso, reso] 63 | cfg.MODEL.BACKBONE.LAYERS = layers 64 | cfg.MODEL.BACKBONE.FEATURE_DIMS = dim 65 | cfg.MODEL.BACKBONE.CLS_DIMS = dim2 66 | 67 | cfg = max_batch_size(cfg) 68 | 69 | ret = simple_train( 70 | cfg=cfg, 71 | progress=progress, 72 | rm_soup=True, 73 | **kwargs, 74 | ) 75 | 76 | 77 | def run_ray( 78 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1, time_budget_s=None 79 | ): 80 | cfg = copy.deepcopy(cfg) 81 | if rm: 82 | import shutil 83 | 84 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 85 | 86 | try: 87 | ana = tune.run( 88 | tune.with_parameters(job, cfg=cfg, progress=progress), 89 | local_dir=cfg.RESULTS_DIR, 90 | config=tune_config, 91 | resources_per_trial={"cpu": 1, "gpu": 1}, 92 | num_samples=num_samples, 93 | name=name, 94 | verbose=verbose, 95 | resume="AUTO+ERRORED", 96 | trial_dirname_creator=trial_dirname_creator, 97 | time_budget_s=time_budget_s 98 | ) 99 | except Exception as e: 100 | print(e) 101 | # print traceback 102 | import traceback 103 | 104 | traceback.print_exc() 105 | 106 | 107 | # ROW_LIST = [ 108 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 109 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 110 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 111 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 112 | # ] 113 | # - 114 | if __name__ == "__main__": 115 | parser = get_parser() 116 | args = parser.parse_args() 117 | t = args.time if args.time > 0 else None 118 | 119 | cfg = load_from_yaml("/workspace/configs/debug.yaml") 120 | 121 | cfg.RESULTS_DIR = "/nfscc/ray_results/morevoxel/" 122 | 123 | tune_config = { 124 | "DATASET.SUBJECT_LIST": tune.grid_search([['subj01']]), 125 | # "DATAMODULE.BATCH_SIZE": tune.grid_search([8, 16, 32]), 126 | "DATASET.ROIS": tune.grid_search([['all'], ['orig'], ['E'], ['P']]) 127 | } 128 | name = f"break_ls_or_not" 129 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1, t) -------------------------------------------------------------------------------- /mem/scripts_tune/tune_prevframe.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from cluster_utils import my_nfs_cluster_job, trial_dirname_creator 3 | 4 | import argparse 5 | import os 6 | import sys 7 | from random import seed, shuffle 8 | 9 | import numpy as np 10 | import ray 11 | from ray import tune 12 | 13 | from config_utils import dict_to_list, get_cfg_defaults, load_from_yaml 14 | 15 | from train_utils import max_batch_size, simple_train 16 | 17 | 18 | def get_parser(): 19 | parser = argparse.ArgumentParser(description="Ray Tune") 20 | 21 | parser.add_argument( 22 | "-v", "--verbose", action="store_true", help="verbose", default=False 23 | ) 24 | 25 | parser.add_argument( 26 | "-p", "--progress", action="store_true", help="progress", default=False 27 | ) 28 | 29 | parser.add_argument( 30 | "--rm", action="store_true", default=False, help="Remove all previous results" 31 | ) 32 | 33 | parser.add_argument( 34 | "--name", type=str, default="debug", help="Name of the experiment" 35 | ) 36 | return parser 37 | 38 | BACKBONEC = { 39 | 'clip_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 40 | 'clip_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 41 | 'clip_vit_s': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 42 | 'dinov2_vit_l': (224, [5, 11, 17, 23], [1024, 1024, 1024, 1024], [2048, 2048, 2048, 1024]), 43 | 'dinov2_vit_b': (224, [2, 5, 8, 11], [768, 768, 768, 768], [1536, 1536, 1536, 768]), 44 | 'dinov2_vit_s': (224, [2, 5, 8, 11], [384, 384, 384, 384], [768, 768, 768, 384]), 45 | } 46 | 47 | @my_nfs_cluster_job 48 | def job(tune_dict, cfg, progress=False, **kwargs): 49 | if "row" in tune_dict: 50 | global ROW_LIST 51 | row = tune_dict["row"] 52 | tune_dict.pop("row") 53 | print(ROW_LIST[row]) 54 | tune_dict.update(ROW_LIST[row]) 55 | 56 | cfg.merge_from_list(dict_to_list(tune_dict)) 57 | 58 | reso, layers, dim, dim2 = BACKBONEC[cfg.MODEL.BACKBONE.NAME] 59 | cfg.DATASET.IMAGE_RESOLUTION = [reso, reso] 60 | cfg.MODEL.BACKBONE.LAYERS = layers 61 | cfg.MODEL.BACKBONE.FEATURE_DIMS = dim 62 | cfg.MODEL.BACKBONE.CLS_DIMS = dim2 63 | 64 | cfg = max_batch_size(cfg) 65 | 66 | ret = simple_train( 67 | cfg=cfg, 68 | progress=progress, 69 | **kwargs, 70 | ) 71 | 72 | 73 | def run_ray( 74 | name, cfg, tune_config, rm=False, progress=False, verbose=False, num_samples=1 75 | ): 76 | cfg = copy.deepcopy(cfg) 77 | if rm: 78 | import shutil 79 | 80 | shutil.rmtree(os.path.join(cfg.RESULTS_DIR, name), ignore_errors=True) 81 | 82 | ana = tune.run( 83 | tune.with_parameters(job, cfg=cfg, progress=progress), 84 | local_dir=cfg.RESULTS_DIR, 85 | config=tune_config, 86 | resources_per_trial={"cpu": 1, "gpu": 1}, 87 | num_samples=num_samples, 88 | name=name, 89 | verbose=verbose, 90 | resume="AUTO+ERRORED", 91 | trial_dirname_creator=trial_dirname_creator, 92 | ) 93 | 94 | 95 | # ROW_LIST = [ 96 | # {"EXPERIMENTAL.USE_PREV_IMAGE": False}, 97 | # {"EXPERIMENTAL.USE_PREV_IMAGE": True}, 98 | # {"EXPERIMENTAL.USE_EVEN_PREV_IMAGE": True}, 99 | # {"EXPERIMENTAL.SHUFFLE_IMAGES": True}, 100 | # ] 101 | # - 102 | if __name__ == "__main__": 103 | parser = get_parser() 104 | args = parser.parse_args() 105 | 106 | # cfg = load_from_yaml("/workspace/configs/dino_t1.yaml") 107 | 108 | cfg = get_cfg_defaults() 109 | cfg.DATAMODULE.BATCH_SIZE = 32 110 | cfg.TRAINER.ACCUMULATE_GRAD_BATCHES = 2 111 | cfg.OPTIMIZER.LR = 3e-3 112 | cfg.OPTIMIZER.NAME = "AdamW" 113 | cfg.DATASET.ROIS = ["orig"] 114 | cfg.DATASET.FMRI_SPACE = 'fsaverage' 115 | cfg.DATASET.SUBJECT_LIST = ["subj01"] 116 | cfg.MODEL.BACKBONE.NAME = "dinov2_vit_s" 117 | cfg.TRAINER.CALLBACKS.EARLY_STOP.PATIENCE = 20 118 | cfg.MODEL.CONV_HEAD.SIMPLE = True 119 | 120 | cfg.TRAINER.PRECISION = 16 121 | 122 | cfg.TRAINER.LIMIT_TRAIN_BATCHES = 0.1 123 | cfg.TRAINER.LIMIT_VAL_BATCHES = 1.0 124 | 125 | cfg.RESULTS_DIR = "/nfscc/ray_results/prev_image" 126 | 127 | cfg.EXPERIMENTAL.USE_PREV_FRAME = False 128 | cfg.EXPERIMENTAL.STRAIGHT_FORWARD = True 129 | 130 | cfg.MODEL.BACKBONE.LORA.SCALE = 0.0 131 | cfg.MODEL.BACKBONE.ADAPTIVE_LN.SCALE = 0.0 132 | tune_config = { 133 | "DATASET.SUBJECT_LIST": tune.grid_search([["subj01"], ["subj02"], ["subj03"], ["subj04"], ["subj05"], ["subj06"], ["subj07"], ["subj08"]]), 134 | "EXPERIMENTAL.T_IMAGE": tune.grid_search([-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -16, -20, -24, -28]), 135 | } 136 | name = "image_t_full" 137 | run_ray(name, cfg, tune_config, args.rm, args.progress, args.verbose, 1) --------------------------------------------------------------------------------