├── .DS_Store ├── LICENSE ├── README.md ├── eval ├── .DS_Store ├── ImageNet_probing │ ├── README.md │ ├── dinov2 │ │ ├── __init__.py │ │ ├── configs │ │ │ ├── __init__.py │ │ │ ├── eval │ │ │ │ ├── vitb14_pretrain.yaml │ │ │ │ ├── vitb14_reg4_pretrain.yaml │ │ │ │ ├── vitg14_pretrain.yaml │ │ │ │ ├── vitg14_reg4_pretrain.yaml │ │ │ │ ├── vitl14_pretrain.yaml │ │ │ │ ├── vitl14_reg4_pretrain.yaml │ │ │ │ ├── vits14_pretrain.yaml │ │ │ │ └── vits14_reg4_pretrain.yaml │ │ │ ├── ssl_default_config.yaml │ │ │ └── train │ │ │ │ ├── vitg14.yaml │ │ │ │ ├── vitl14.yaml │ │ │ │ └── vitl16_short.yaml │ │ ├── data │ │ │ ├── __init__.py │ │ │ ├── adapters.py │ │ │ ├── augmentations.py │ │ │ ├── collate.py │ │ │ ├── datasets │ │ │ │ ├── __init__.py │ │ │ │ ├── decoders.py │ │ │ │ ├── extended.py │ │ │ │ ├── image_net.py │ │ │ │ └── image_net_22k.py │ │ │ ├── loaders.py │ │ │ ├── masking.py │ │ │ ├── samplers.py │ │ │ └── transforms.py │ │ ├── distributed │ │ │ └── __init__.py │ │ ├── eval │ │ │ ├── __init__.py │ │ │ ├── depth │ │ │ │ ├── __init__.py │ │ │ │ ├── models │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── backbones │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── vision_transformer.py │ │ │ │ │ ├── builder.py │ │ │ │ │ ├── decode_heads │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── decode_head.py │ │ │ │ │ │ ├── dpt_head.py │ │ │ │ │ │ └── linear_head.py │ │ │ │ │ ├── depther │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base.py │ │ │ │ │ │ └── encoder_decoder.py │ │ │ │ │ └── losses │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── gradientloss.py │ │ │ │ │ │ └── sigloss.py │ │ │ │ └── ops │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── wrappers.py │ │ │ ├── knn.py │ │ │ ├── linear.py │ │ │ ├── linear_proteus.py │ │ │ ├── log_regression.py │ │ │ ├── metrics.py │ │ │ ├── segmentation │ │ │ │ ├── __init__.py │ │ │ │ ├── hooks │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── optimizer.py │ │ │ │ ├── models │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── backbones │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── vision_transformer.py │ │ │ │ │ └── decode_heads │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── linear_head.py │ │ │ │ └── utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── colormaps.py │ │ │ ├── segmentation_m2f │ │ │ │ ├── __init__.py │ │ │ │ ├── core │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── anchor │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── builder.py │ │ │ │ │ │ └── point_generator.py │ │ │ │ │ ├── box │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── builder.py │ │ │ │ │ │ └── samplers │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ ├── base_sampler.py │ │ │ │ │ │ │ ├── mask_pseudo_sampler.py │ │ │ │ │ │ │ ├── mask_sampling_result.py │ │ │ │ │ │ │ └── sampling_result.py │ │ │ │ │ └── utils │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── dist_utils.py │ │ │ │ │ │ └── misc.py │ │ │ │ ├── models │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── backbones │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── adapter_modules.py │ │ │ │ │ │ ├── drop_path.py │ │ │ │ │ │ ├── vit.py │ │ │ │ │ │ └── vit_adapter.py │ │ │ │ │ ├── builder.py │ │ │ │ │ ├── decode_heads │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── mask2former_head.py │ │ │ │ │ ├── losses │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── cross_entropy_loss.py │ │ │ │ │ │ ├── dice_loss.py │ │ │ │ │ │ └── match_costs.py │ │ │ │ │ ├── plugins │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── msdeformattn_pixel_decoder.py │ │ │ │ │ ├── segmentors │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── encoder_decoder_mask2former.py │ │ │ │ │ └── utils │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── assigner.py │ │ │ │ │ │ ├── point_sample.py │ │ │ │ │ │ ├── positional_encoding.py │ │ │ │ │ │ └── transformer.py │ │ │ │ └── ops │ │ │ │ │ └── modules │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── ms_deform_attn.py │ │ │ ├── setup.py │ │ │ └── utils.py │ │ ├── fsdp │ │ │ └── __init__.py │ │ ├── hub │ │ │ ├── __init__.py │ │ │ ├── backbones.py │ │ │ ├── classifiers.py │ │ │ ├── depth │ │ │ │ ├── __init__.py │ │ │ │ ├── decode_heads.py │ │ │ │ ├── encoder_decoder.py │ │ │ │ └── ops.py │ │ │ ├── depthers.py │ │ │ └── utils.py │ │ ├── layers │ │ │ ├── __init__.py │ │ │ ├── attention.py │ │ │ ├── block.py │ │ │ ├── dino_head.py │ │ │ ├── drop_path.py │ │ │ ├── layer_scale.py │ │ │ ├── mlp.py │ │ │ ├── patch_embed.py │ │ │ └── swiglu_ffn.py │ │ ├── logging │ │ │ ├── __init__.py │ │ │ └── helpers.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ ├── dino_clstoken_loss.py │ │ │ ├── ibot_patch_loss.py │ │ │ └── koleo_loss.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ └── vision_transformer.py │ │ ├── run │ │ │ ├── __init__.py │ │ │ ├── eval │ │ │ │ ├── knn.py │ │ │ │ ├── linear.py │ │ │ │ └── log_regression.py │ │ │ ├── submit.py │ │ │ └── train │ │ │ │ └── train.py │ │ ├── train │ │ │ ├── __init__.py │ │ │ ├── ssl_meta_arch.py │ │ │ └── train.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── cluster.py │ │ │ ├── config.py │ │ │ ├── dtype.py │ │ │ ├── param_groups.py │ │ │ └── utils.py │ └── run_probing.sh ├── dense_prediction │ ├── .DS_Store │ ├── README.md │ ├── configs │ │ ├── .DS_Store │ │ ├── _base_ │ │ │ ├── datasets │ │ │ │ ├── ade20k.py │ │ │ │ ├── ade20k_518.py │ │ │ │ ├── ade20k_640x640.py │ │ │ │ ├── nyu.py │ │ │ │ ├── nyu_512x512.py │ │ │ │ └── nyu_518x518.py │ │ │ ├── default_runtime.py │ │ │ ├── models │ │ │ │ ├── dpt_mae_depth.py │ │ │ │ ├── linear_mae_probing.py │ │ │ │ └── upernet_mae.py │ │ │ └── schedules │ │ │ │ ├── schedule_160k.py │ │ │ │ ├── schedule_20k.py │ │ │ │ ├── schedule_240k.py │ │ │ │ ├── schedule_25k.py │ │ │ │ ├── schedule_320k.py │ │ │ │ ├── schedule_40k.py │ │ │ │ ├── schedule_60k.py │ │ │ │ └── schedule_80k.py │ │ ├── dpt │ │ │ ├── .DS_Store │ │ │ ├── dpt_mae-b14_8xb2-160k_nyu-518x518_proteus.py │ │ │ ├── dpt_mae-l14_8xb2-160k_nyu-518x518_proteus.py │ │ │ ├── dpt_mae-s14_8xb2-160k_nyu-518x518_proteus.py │ │ │ ├── dpt_mae-s14_8xb2-160k_nyu-518x518_proteus_probing.py │ │ │ └── metafile.yaml │ │ └── mae │ │ │ ├── .DS_Store │ │ │ ├── mae-base_upernet_8xb2-amp-60k_ade20k-518x518_proteus.py │ │ │ ├── mae-large_upernet_8xb2-amp-160k_ade20k-518x518_proteus.py │ │ │ ├── mae-small_linear_8xb2-amp-60k_ade20k-518x518_proteus_probing.py │ │ │ ├── mae-small_upernet_8xb2-amp-60k_ade20k-518x518_proteus.py │ │ │ └── metafile.yaml │ ├── mmseg │ │ ├── .mim │ │ │ ├── configs │ │ │ │ ├── _base_ │ │ │ │ │ ├── datasets │ │ │ │ │ │ ├── ade20k.py │ │ │ │ │ │ ├── ade20k_518.py │ │ │ │ │ │ ├── ade20k_640x640.py │ │ │ │ │ │ ├── nyu.py │ │ │ │ │ │ ├── nyu_512x512.py │ │ │ │ │ │ └── nyu_518x518.py │ │ │ │ │ ├── default_runtime.py │ │ │ │ │ ├── models │ │ │ │ │ │ ├── dpt_mae_depth.py │ │ │ │ │ │ ├── linear_mae_probing.py │ │ │ │ │ │ └── upernet_mae.py │ │ │ │ │ └── schedules │ │ │ │ │ │ ├── schedule_160k.py │ │ │ │ │ │ ├── schedule_20k.py │ │ │ │ │ │ ├── schedule_240k.py │ │ │ │ │ │ ├── schedule_25k.py │ │ │ │ │ │ ├── schedule_320k.py │ │ │ │ │ │ ├── schedule_40k.py │ │ │ │ │ │ ├── schedule_60k.py │ │ │ │ │ │ └── schedule_80k.py │ │ │ │ ├── dpt │ │ │ │ │ ├── README.md │ │ │ │ │ ├── dpt_mae-b14_8xb2-160k_nyu-518x518_proteus.py │ │ │ │ │ ├── dpt_mae-l14_8xb2-160k_nyu-518x518_proteus.py │ │ │ │ │ ├── dpt_mae-s14_8xb2-160k_nyu-518x518_proteus.py │ │ │ │ │ ├── dpt_mae-s14_8xb2-160k_nyu-518x518_proteus_probing.py │ │ │ │ │ └── metafile.yaml │ │ │ │ └── mae │ │ │ │ │ ├── README.md │ │ │ │ │ ├── mae-base_upernet_8xb2-amp-60k_ade20k-518x518_proteus.py │ │ │ │ │ ├── mae-large_upernet_8xb2-amp-160k_ade20k-518x518_proteus.py │ │ │ │ │ ├── mae-small_linear_8xb2-amp-60k_ade20k-518x518_proteus_probing.py │ │ │ │ │ ├── mae-small_upernet_8xb2-amp-60k_ade20k-518x518_proteus.py │ │ │ │ │ └── metafile.yaml │ │ │ └── tools │ │ │ │ ├── analysis_tools │ │ │ │ ├── analyze_logs.py │ │ │ │ ├── benchmark.py │ │ │ │ ├── browse_dataset.py │ │ │ │ ├── confusion_matrix.py │ │ │ │ ├── get_flops.py │ │ │ │ └── visualization_cam.py │ │ │ │ ├── dataset_converters │ │ │ │ ├── chase_db1.py │ │ │ │ ├── cityscapes.py │ │ │ │ ├── coco_stuff10k.py │ │ │ │ ├── coco_stuff164k.py │ │ │ │ ├── drive.py │ │ │ │ ├── hrf.py │ │ │ │ ├── isaid.py │ │ │ │ ├── levircd.py │ │ │ │ ├── loveda.py │ │ │ │ ├── nyu.py │ │ │ │ ├── pascal_context.py │ │ │ │ ├── potsdam.py │ │ │ │ ├── refuge.py │ │ │ │ ├── stare.py │ │ │ │ ├── synapse.py │ │ │ │ ├── vaihingen.py │ │ │ │ └── voc_aug.py │ │ │ │ ├── deployment │ │ │ │ └── pytorch2torchscript.py │ │ │ │ ├── dist_test.sh │ │ │ │ ├── dist_train.sh │ │ │ │ ├── misc │ │ │ │ ├── browse_dataset.py │ │ │ │ ├── print_config.py │ │ │ │ └── publish_model.py │ │ │ │ ├── model_converters │ │ │ │ ├── beit2mmseg.py │ │ │ │ ├── mit2mmseg.py │ │ │ │ ├── proteus2mmseg.py │ │ │ │ ├── san2mmseg.py │ │ │ │ ├── stdc2mmseg.py │ │ │ │ ├── swin2mmseg.py │ │ │ │ ├── twins2mmseg.py │ │ │ │ ├── vit2mmseg.py │ │ │ │ └── vitjax2mmseg.py │ │ │ │ ├── slurm_test.sh │ │ │ │ ├── slurm_train.sh │ │ │ │ ├── test.py │ │ │ │ ├── torchserve │ │ │ │ ├── mmseg2torchserve.py │ │ │ │ ├── mmseg_handler.py │ │ │ │ └── test_torchserve.py │ │ │ │ ├── train.py │ │ │ │ └── train_fix.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── checkpoint.cpython-38.pyc │ │ │ └── version.cpython-38.pyc │ │ ├── apis │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── inference.cpython-38.pyc │ │ │ │ ├── mmseg_inferencer.cpython-38.pyc │ │ │ │ ├── remote_sense_inferencer.cpython-38.pyc │ │ │ │ └── utils.cpython-38.pyc │ │ │ ├── inference.py │ │ │ ├── mmseg_inferencer.py │ │ │ ├── remote_sense_inferencer.py │ │ │ └── utils.py │ │ ├── configs │ │ │ └── _base_ │ │ │ │ ├── datasets │ │ │ │ ├── loveda.py │ │ │ │ └── potsdam.py │ │ │ │ ├── default_runtime.py │ │ │ │ └── schedules │ │ │ │ ├── schedule_160k.py │ │ │ │ ├── schedule_20k.py │ │ │ │ ├── schedule_240k.py │ │ │ │ ├── schedule_25k.py │ │ │ │ ├── schedule_320k.py │ │ │ │ ├── schedule_40k.py │ │ │ │ └── schedule_80k.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── ade.cpython-38.pyc │ │ │ │ ├── basesegdataset.cpython-38.pyc │ │ │ │ ├── bdd100k.cpython-38.pyc │ │ │ │ ├── chase_db1.cpython-38.pyc │ │ │ │ ├── cityscapes.cpython-38.pyc │ │ │ │ ├── coco_stuff.cpython-38.pyc │ │ │ │ ├── dark_zurich.cpython-38.pyc │ │ │ │ ├── dataset_wrappers.cpython-38.pyc │ │ │ │ ├── decathlon.cpython-38.pyc │ │ │ │ ├── drive.cpython-38.pyc │ │ │ │ ├── dsdl.cpython-38.pyc │ │ │ │ ├── hrf.cpython-38.pyc │ │ │ │ ├── hsi_drive.cpython-38.pyc │ │ │ │ ├── isaid.cpython-38.pyc │ │ │ │ ├── isprs.cpython-38.pyc │ │ │ │ ├── levir.cpython-38.pyc │ │ │ │ ├── lip.cpython-38.pyc │ │ │ │ ├── loveda.cpython-38.pyc │ │ │ │ ├── mapillary.cpython-38.pyc │ │ │ │ ├── night_driving.cpython-38.pyc │ │ │ │ ├── nyu.cpython-38.pyc │ │ │ │ ├── pascal_context.cpython-38.pyc │ │ │ │ ├── potsdam.cpython-38.pyc │ │ │ │ ├── refuge.cpython-38.pyc │ │ │ │ ├── stare.cpython-38.pyc │ │ │ │ ├── synapse.cpython-38.pyc │ │ │ │ └── voc.cpython-38.pyc │ │ │ ├── ade.py │ │ │ ├── basesegdataset.py │ │ │ ├── nyu.py │ │ │ └── transforms │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── formatting.cpython-38.pyc │ │ │ │ ├── loading.cpython-38.pyc │ │ │ │ └── transforms.cpython-38.pyc │ │ │ │ ├── formatting.py │ │ │ │ ├── loading.py │ │ │ │ └── transforms.py │ │ ├── engine │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ └── __init__.cpython-38.pyc │ │ │ ├── hooks │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ └── visualization_hook.cpython-38.pyc │ │ │ │ └── visualization_hook.py │ │ │ ├── optimizers │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── force_default_constructor.cpython-38.pyc │ │ │ │ │ └── layer_decay_optimizer_constructor.cpython-38.pyc │ │ │ │ ├── force_default_constructor.py │ │ │ │ └── layer_decay_optimizer_constructor.py │ │ │ └── schedulers │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ └── poly_ratio_scheduler.cpython-38.pyc │ │ │ │ └── poly_ratio_scheduler.py │ │ ├── evaluation │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ └── __init__.cpython-38.pyc │ │ │ └── metrics │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── citys_metric.cpython-38.pyc │ │ │ │ ├── depth_metric.cpython-38.pyc │ │ │ │ └── iou_metric.cpython-38.pyc │ │ │ │ ├── citys_metric.py │ │ │ │ ├── depth_metric.py │ │ │ │ └── iou_metric.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── builder.cpython-38.pyc │ │ │ │ └── data_preprocessor.cpython-38.pyc │ │ │ ├── assigners │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── base_assigner.cpython-38.pyc │ │ │ │ │ ├── hungarian_assigner.cpython-38.pyc │ │ │ │ │ └── match_cost.cpython-38.pyc │ │ │ │ ├── base_assigner.py │ │ │ │ ├── hungarian_assigner.py │ │ │ │ └── match_cost.py │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── beit.cpython-38.pyc │ │ │ │ │ ├── bisenetv1.cpython-38.pyc │ │ │ │ │ ├── bisenetv2.cpython-38.pyc │ │ │ │ │ ├── cgnet.cpython-38.pyc │ │ │ │ │ ├── ddrnet.cpython-38.pyc │ │ │ │ │ ├── deit.cpython-38.pyc │ │ │ │ │ ├── erfnet.cpython-38.pyc │ │ │ │ │ ├── fast_scnn.cpython-38.pyc │ │ │ │ │ ├── hrnet.cpython-38.pyc │ │ │ │ │ ├── icnet.cpython-38.pyc │ │ │ │ │ ├── mae.cpython-38.pyc │ │ │ │ │ ├── mae_clip.cpython-38.pyc │ │ │ │ │ ├── mae_clipv2.cpython-38.pyc │ │ │ │ │ ├── mae_clipv2_fix.cpython-38.pyc │ │ │ │ │ ├── mae_clipv3.cpython-38.pyc │ │ │ │ │ ├── mae_clipv3_fix.cpython-38.pyc │ │ │ │ │ ├── mae_distill.cpython-38.pyc │ │ │ │ │ ├── mae_fix.cpython-38.pyc │ │ │ │ │ ├── mit.cpython-38.pyc │ │ │ │ │ ├── mobilenet_v2.cpython-38.pyc │ │ │ │ │ ├── mobilenet_v3.cpython-38.pyc │ │ │ │ │ ├── mscan.cpython-38.pyc │ │ │ │ │ ├── pidnet.cpython-38.pyc │ │ │ │ │ ├── resnest.cpython-38.pyc │ │ │ │ │ ├── resnet.cpython-38.pyc │ │ │ │ │ ├── resnext.cpython-38.pyc │ │ │ │ │ ├── stdc.cpython-38.pyc │ │ │ │ │ ├── swin.cpython-38.pyc │ │ │ │ │ ├── timm_backbone.cpython-38.pyc │ │ │ │ │ ├── twins.cpython-38.pyc │ │ │ │ │ ├── unet.cpython-38.pyc │ │ │ │ │ ├── uniformer.cpython-38.pyc │ │ │ │ │ ├── uniformer_cross_fix.cpython-38.pyc │ │ │ │ │ ├── uniformer_official.cpython-38.pyc │ │ │ │ │ ├── vit.cpython-38.pyc │ │ │ │ │ └── vpd.cpython-38.pyc │ │ │ │ ├── beit.py │ │ │ │ ├── mae.py │ │ │ │ ├── mae_fix.py │ │ │ │ └── vit.py │ │ │ ├── builder.py │ │ │ ├── data_preprocessor.py │ │ │ ├── decode_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── ann_head.cpython-38.pyc │ │ │ │ │ ├── apc_head.cpython-38.pyc │ │ │ │ │ ├── aspp_head.cpython-38.pyc │ │ │ │ │ ├── cascade_decode_head.cpython-38.pyc │ │ │ │ │ ├── cc_head.cpython-38.pyc │ │ │ │ │ ├── da_head.cpython-38.pyc │ │ │ │ │ ├── ddr_head.cpython-38.pyc │ │ │ │ │ ├── decode_head.cpython-38.pyc │ │ │ │ │ ├── decode_head_depth.cpython-38.pyc │ │ │ │ │ ├── decode_head_wo_class.cpython-38.pyc │ │ │ │ │ ├── dm_head.cpython-38.pyc │ │ │ │ │ ├── dnl_head.cpython-38.pyc │ │ │ │ │ ├── dpt_head.cpython-38.pyc │ │ │ │ │ ├── dpt_head_depth.cpython-38.pyc │ │ │ │ │ ├── ema_head.cpython-38.pyc │ │ │ │ │ ├── enc_head.cpython-38.pyc │ │ │ │ │ ├── fcn_head.cpython-38.pyc │ │ │ │ │ ├── fpn_head.cpython-38.pyc │ │ │ │ │ ├── gc_head.cpython-38.pyc │ │ │ │ │ ├── ham_head.cpython-38.pyc │ │ │ │ │ ├── isa_head.cpython-38.pyc │ │ │ │ │ ├── knet_head.cpython-38.pyc │ │ │ │ │ ├── lraspp_head.cpython-38.pyc │ │ │ │ │ ├── mask2former_head.cpython-38.pyc │ │ │ │ │ ├── maskformer_head.cpython-38.pyc │ │ │ │ │ ├── nl_head.cpython-38.pyc │ │ │ │ │ ├── ocr_head.cpython-38.pyc │ │ │ │ │ ├── pid_head.cpython-38.pyc │ │ │ │ │ ├── point_head.cpython-38.pyc │ │ │ │ │ ├── psa_head.cpython-38.pyc │ │ │ │ │ ├── psp_head.cpython-38.pyc │ │ │ │ │ ├── san_head.cpython-38.pyc │ │ │ │ │ ├── segformer_head.cpython-38.pyc │ │ │ │ │ ├── segmenter_mask_head.cpython-38.pyc │ │ │ │ │ ├── sep_aspp_head.cpython-38.pyc │ │ │ │ │ ├── sep_fcn_head.cpython-38.pyc │ │ │ │ │ ├── setr_mla_head.cpython-38.pyc │ │ │ │ │ ├── setr_up_head.cpython-38.pyc │ │ │ │ │ ├── stdc_head.cpython-38.pyc │ │ │ │ │ ├── uper_head.cpython-38.pyc │ │ │ │ │ └── vpd_depth_head.cpython-38.pyc │ │ │ │ ├── decode_head.py │ │ │ │ ├── dpt_head.py │ │ │ │ ├── dpt_head_depth.py │ │ │ │ ├── fcn_head.py │ │ │ │ ├── psp_head.py │ │ │ │ └── uper_head.py │ │ │ ├── losses │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── accuracy.cpython-38.pyc │ │ │ │ │ ├── boundary_loss.cpython-38.pyc │ │ │ │ │ ├── cross_entropy_loss.cpython-38.pyc │ │ │ │ │ ├── dice_loss.cpython-38.pyc │ │ │ │ │ ├── focal_loss.cpython-38.pyc │ │ │ │ │ ├── huasdorff_distance_loss.cpython-38.pyc │ │ │ │ │ ├── lovasz_loss.cpython-38.pyc │ │ │ │ │ ├── ohem_cross_entropy_loss.cpython-38.pyc │ │ │ │ │ ├── sigloss.cpython-38.pyc │ │ │ │ │ ├── silog_loss.cpython-38.pyc │ │ │ │ │ ├── tversky_loss.cpython-38.pyc │ │ │ │ │ └── utils.cpython-38.pyc │ │ │ │ ├── accuracy.py │ │ │ │ ├── boundary_loss.py │ │ │ │ ├── cross_entropy_loss.py │ │ │ │ ├── dice_loss.py │ │ │ │ ├── focal_loss.py │ │ │ │ ├── huasdorff_distance_loss.py │ │ │ │ ├── kldiv_loss.py │ │ │ │ ├── lovasz_loss.py │ │ │ │ ├── ohem_cross_entropy_loss.py │ │ │ │ ├── sigloss.py │ │ │ │ ├── silog_loss.py │ │ │ │ ├── tversky_loss.py │ │ │ │ └── utils.py │ │ │ ├── necks │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── featurepyramid.cpython-38.pyc │ │ │ │ │ ├── fpn.cpython-38.pyc │ │ │ │ │ ├── ic_neck.cpython-38.pyc │ │ │ │ │ ├── jpu.cpython-38.pyc │ │ │ │ │ ├── mla_neck.cpython-38.pyc │ │ │ │ │ └── multilevel_neck.cpython-38.pyc │ │ │ │ └── featurepyramid.py │ │ │ ├── segmentors │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── base.cpython-38.pyc │ │ │ │ │ ├── cascade_encoder_decoder.cpython-38.pyc │ │ │ │ │ ├── depth_estimator.cpython-38.pyc │ │ │ │ │ ├── encoder_decoder.cpython-38.pyc │ │ │ │ │ ├── encoder_decoder_wo_class.cpython-38.pyc │ │ │ │ │ ├── multimodal_encoder_decoder.cpython-38.pyc │ │ │ │ │ └── seg_tta.cpython-38.pyc │ │ │ │ ├── base.py │ │ │ │ ├── cascade_encoder_decoder.py │ │ │ │ ├── depth_estimator.py │ │ │ │ ├── encoder_decoder.py │ │ │ │ ├── multimodal_encoder_decoder.py │ │ │ │ └── seg_tta.py │ │ │ ├── text_encoder │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ └── clip_text_encoder.cpython-38.pyc │ │ │ │ └── clip_text_encoder.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── basic_block.cpython-38.pyc │ │ │ │ ├── embed.cpython-38.pyc │ │ │ │ ├── encoding.cpython-38.pyc │ │ │ │ ├── inverted_residual.cpython-38.pyc │ │ │ │ ├── make_divisible.cpython-38.pyc │ │ │ │ ├── point_sample.cpython-38.pyc │ │ │ │ ├── ppm.cpython-38.pyc │ │ │ │ ├── res_layer.cpython-38.pyc │ │ │ │ ├── san_layers.cpython-38.pyc │ │ │ │ ├── se_layer.cpython-38.pyc │ │ │ │ ├── self_attention_block.cpython-38.pyc │ │ │ │ ├── shape_convert.cpython-38.pyc │ │ │ │ ├── up_conv_block.cpython-38.pyc │ │ │ │ └── wrappers.cpython-38.pyc │ │ │ │ ├── basic_block.py │ │ │ │ ├── embed.py │ │ │ │ ├── encoding.py │ │ │ │ ├── inverted_residual.py │ │ │ │ ├── make_divisible.py │ │ │ │ ├── point_sample.py │ │ │ │ ├── ppm.py │ │ │ │ ├── res_layer.py │ │ │ │ ├── san_layers.py │ │ │ │ ├── se_layer.py │ │ │ │ ├── self_attention_block.py │ │ │ │ ├── shape_convert.py │ │ │ │ ├── up_conv_block.py │ │ │ │ └── wrappers.py │ │ ├── registry │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ └── registry.cpython-38.pyc │ │ │ └── registry.py │ │ ├── structures │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ └── seg_data_sample.cpython-38.pyc │ │ │ ├── sampler │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── base_pixel_sampler.cpython-38.pyc │ │ │ │ │ ├── builder.cpython-38.pyc │ │ │ │ │ └── ohem_pixel_sampler.cpython-38.pyc │ │ │ │ ├── base_pixel_sampler.py │ │ │ │ ├── builder.py │ │ │ │ └── ohem_pixel_sampler.py │ │ │ └── seg_data_sample.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── class_names.cpython-38.pyc │ │ │ │ ├── collect_env.cpython-38.pyc │ │ │ │ ├── get_templates.cpython-38.pyc │ │ │ │ ├── io.cpython-38.pyc │ │ │ │ ├── mask_classification.cpython-38.pyc │ │ │ │ ├── misc.cpython-38.pyc │ │ │ │ ├── set_env.cpython-38.pyc │ │ │ │ ├── tokenizer.cpython-38.pyc │ │ │ │ └── typing_utils.cpython-38.pyc │ │ │ ├── bpe_simple_vocab_16e6.txt.gz │ │ │ ├── class_names.py │ │ │ ├── collect_env.py │ │ │ ├── get_templates.py │ │ │ ├── io.py │ │ │ ├── mask_classification.py │ │ │ ├── misc.py │ │ │ ├── set_env.py │ │ │ ├── tokenizer.py │ │ │ └── typing_utils.py │ │ ├── version.py │ │ └── visualization │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ └── local_visualizer.cpython-38.pyc │ │ │ └── local_visualizer.py │ ├── prepare_ckpt.sh │ ├── requirements.txt │ ├── requirements │ │ ├── albu.txt │ │ ├── docs.txt │ │ ├── mminstall.txt │ │ ├── multimodal.txt │ │ ├── optional.txt │ │ ├── readthedocs.txt │ │ ├── runtime.txt │ │ └── tests.txt │ ├── run.sh │ ├── setup.cfg │ ├── setup.py │ ├── tests │ │ ├── __init__.py │ │ ├── test_apis │ │ │ ├── test_inferencer.py │ │ │ ├── test_rs_inferencer.py │ │ │ └── utils.py │ │ ├── test_config.py │ │ ├── test_datasets │ │ │ ├── test_dataset.py │ │ │ ├── test_dataset_builder.py │ │ │ ├── test_formatting.py │ │ │ ├── test_loading.py │ │ │ ├── test_transform.py │ │ │ └── test_tta.py │ │ ├── test_digit_version.py │ │ ├── test_engine │ │ │ ├── test_layer_decay_optimizer_constructor.py │ │ │ ├── test_optimizer.py │ │ │ └── test_visualization_hook.py │ │ ├── test_evaluation │ │ │ └── test_metrics │ │ │ │ ├── test_citys_metric.py │ │ │ │ ├── test_depth_metric.py │ │ │ │ └── test_iou_metric.py │ │ ├── test_models │ │ │ ├── __init__.py │ │ │ ├── test_assigners │ │ │ │ └── test_hungarian_assigner.py │ │ │ ├── test_backbones │ │ │ │ ├── __init__.py │ │ │ │ ├── test_beit.py │ │ │ │ ├── test_bisenetv1.py │ │ │ │ ├── test_bisenetv2.py │ │ │ │ ├── test_blocks.py │ │ │ │ ├── test_cgnet.py │ │ │ │ ├── test_clip_text_encoder.py │ │ │ │ ├── test_erfnet.py │ │ │ │ ├── test_fast_scnn.py │ │ │ │ ├── test_hrnet.py │ │ │ │ ├── test_icnet.py │ │ │ │ ├── test_mae.py │ │ │ │ ├── test_mit.py │ │ │ │ ├── test_mobilenet_v3.py │ │ │ │ ├── test_mscan.py │ │ │ │ ├── test_pidnet.py │ │ │ │ ├── test_resnest.py │ │ │ │ ├── test_resnet.py │ │ │ │ ├── test_resnext.py │ │ │ │ ├── test_stdc.py │ │ │ │ ├── test_swin.py │ │ │ │ ├── test_timm_backbone.py │ │ │ │ ├── test_twins.py │ │ │ │ ├── test_unet.py │ │ │ │ ├── test_vit.py │ │ │ │ ├── test_vpd.py │ │ │ │ └── utils.py │ │ │ ├── test_data_preprocessor.py │ │ │ ├── test_forward.py │ │ │ ├── test_heads │ │ │ │ ├── __init__.py │ │ │ │ ├── test_ann_head.py │ │ │ │ ├── test_apc_head.py │ │ │ │ ├── test_aspp_head.py │ │ │ │ ├── test_cc_head.py │ │ │ │ ├── test_decode_head.py │ │ │ │ ├── test_dm_head.py │ │ │ │ ├── test_dnl_head.py │ │ │ │ ├── test_dpt_head.py │ │ │ │ ├── test_ema_head.py │ │ │ │ ├── test_fcn_head.py │ │ │ │ ├── test_gc_head.py │ │ │ │ ├── test_ham_head.py │ │ │ │ ├── test_isa_head.py │ │ │ │ ├── test_lraspp_head.py │ │ │ │ ├── test_mask2former_head.py │ │ │ │ ├── test_maskformer_head.py │ │ │ │ ├── test_nl_head.py │ │ │ │ ├── test_ocr_head.py │ │ │ │ ├── test_pidnet_head.py │ │ │ │ ├── test_psa_head.py │ │ │ │ ├── test_psp_head.py │ │ │ │ ├── test_san_head.py │ │ │ │ ├── test_segformer_head.py │ │ │ │ ├── test_segmenter_mask_head.py │ │ │ │ ├── test_setr_mla_head.py │ │ │ │ ├── test_setr_up_head.py │ │ │ │ ├── test_uper_head.py │ │ │ │ ├── test_vpd_depth_head.py │ │ │ │ └── utils.py │ │ │ ├── test_losses │ │ │ │ ├── test_cross_entropy_loss.py │ │ │ │ ├── test_dice_loss.py │ │ │ │ ├── test_huasdorff_distance_loss.py │ │ │ │ ├── test_kldiv_loss.py │ │ │ │ ├── test_silog_loss.py │ │ │ │ └── test_tversky_loss.py │ │ │ ├── test_necks │ │ │ │ ├── __init__.py │ │ │ │ ├── test_feature2pyramid.py │ │ │ │ ├── test_fpn.py │ │ │ │ ├── test_ic_neck.py │ │ │ │ ├── test_jpu.py │ │ │ │ ├── test_mla_neck.py │ │ │ │ └── test_multilevel_neck.py │ │ │ ├── test_segmentors │ │ │ │ ├── __init__.py │ │ │ │ ├── test_cascade_encoder_decoder.py │ │ │ │ ├── test_depth_estimator.py │ │ │ │ ├── test_encoder_decoder.py │ │ │ │ ├── test_multimodal_encoder_decoder.py │ │ │ │ ├── test_seg_tta_model.py │ │ │ │ └── utils.py │ │ │ └── test_utils │ │ │ │ ├── __init__.py │ │ │ │ ├── test_embed.py │ │ │ │ └── test_shape_convert.py │ │ ├── test_sampler.py │ │ ├── test_structures │ │ │ └── test_seg_data_sample.py │ │ ├── test_utils │ │ │ ├── test_io.py │ │ │ └── test_set_env.py │ │ └── test_visualization │ │ │ └── test_local_visualizer.py │ └── tools │ │ ├── analysis_tools │ │ ├── analyze_logs.py │ │ ├── benchmark.py │ │ ├── browse_dataset.py │ │ ├── confusion_matrix.py │ │ ├── get_flops.py │ │ └── visualization_cam.py │ │ ├── dataset_converters │ │ ├── chase_db1.py │ │ ├── cityscapes.py │ │ ├── coco_stuff10k.py │ │ ├── coco_stuff164k.py │ │ ├── drive.py │ │ ├── hrf.py │ │ ├── isaid.py │ │ ├── levircd.py │ │ ├── loveda.py │ │ ├── nyu.py │ │ ├── pascal_context.py │ │ ├── potsdam.py │ │ ├── refuge.py │ │ ├── stare.py │ │ ├── synapse.py │ │ ├── vaihingen.py │ │ └── voc_aug.py │ │ ├── deployment │ │ └── pytorch2torchscript.py │ │ ├── dist_test.sh │ │ ├── dist_train.sh │ │ ├── misc │ │ ├── browse_dataset.py │ │ ├── print_config.py │ │ └── publish_model.py │ │ ├── model_converters │ │ ├── beit2mmseg.py │ │ ├── mit2mmseg.py │ │ ├── proteus2mmseg.py │ │ ├── san2mmseg.py │ │ ├── stdc2mmseg.py │ │ ├── swin2mmseg.py │ │ ├── twins2mmseg.py │ │ ├── vit2mmseg.py │ │ └── vitjax2mmseg.py │ │ ├── slurm_test.sh │ │ ├── slurm_train.sh │ │ ├── test.py │ │ ├── torchserve │ │ ├── mmseg2torchserve.py │ │ ├── mmseg_handler.py │ │ └── test_torchserve.py │ │ ├── train.py │ │ └── train_fix.py └── fine_grained │ ├── README.md │ ├── downstreams │ ├── caltech101.py │ ├── cub.py │ ├── eurosat.py │ ├── sun397.py │ └── voc2007.py │ ├── main_downstream_linear_dinov2.py │ ├── models_dinov2.py │ └── run_all_datasets.sh ├── fig ├── .DS_Store ├── deit.png ├── dino_bl.png ├── dino_s.png ├── general.png ├── less.png ├── proxy_dataset.png ├── resized_logo.png ├── teaser.png └── visual.png └── pretrain ├── README.md ├── __pycache__ ├── augmentations.cpython-39.pyc ├── datasets.cpython-39.pyc ├── losses_hint.cpython-39.pyc ├── models_clip.cpython-39.pyc ├── models_dinov2.cpython-39.pyc ├── models_proteus_clip.cpython-39.pyc ├── models_proteus_dinov2.cpython-39.pyc ├── models_proteus_synclr.cpython-39.pyc ├── models_synclr.cpython-39.pyc ├── samplers.cpython-39.pyc └── utils.cpython-39.pyc ├── augmentations.py ├── datasets.py ├── losses_hint.py ├── main.py ├── models_clip.py ├── models_dinov2.py ├── models_proteus_clip.py ├── models_proteus_dinov2.py ├── models_proteus_synclr.py ├── models_synclr.py ├── run_pretrain.sh ├── samplers.py └── utils.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/.DS_Store -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 BeSpontaneous 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /eval/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/.DS_Store -------------------------------------------------------------------------------- /eval/ImageNet_probing/README.md: -------------------------------------------------------------------------------- 1 | # Linear Evaluation on ImageNet-1K 2 | 3 | 4 | ## Installation 5 | Please follow the installation instructions in `pretrain`. 6 | 7 | ## Dataset 8 | We prepare ImageNet-1K following the instructions in [DINOv2](https://github.com/facebookresearch/dinov2/tree/main?tab=readme-ov-file#data-preparation) (there are additional files needed compared to the pre-training stage). 9 | 10 | ## Training 11 | 1. Specify the config file with `config-file` in the training script `run_probing.sh`. 12 | 2. Use the `pretrained-weights` parameter to provide the path to your pre-trained model. 13 | 3. Replace `imagenet_path` in the `train-dataset` and `val-dataset` parameters with the directory where your datasets are located. 14 | 4. Simply run the training script as follows: 15 | 16 | ``` 17 | bash run_probing.sh 18 | ``` 19 | 20 | ## Acknowledgment 21 | 22 | This part is heavily build upon [DINOv2](https://github.com/facebookresearch/dinov2). We gratefully thank the authors for their wonderful works. -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | __version__ = "0.0.1" 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | import pathlib 7 | 8 | from omegaconf import OmegaConf 9 | 10 | 11 | def load_config(config_name: str): 12 | config_filename = config_name + ".yaml" 13 | return OmegaConf.load(pathlib.Path(__file__).parent.resolve() / config_filename) 14 | 15 | 16 | dinov2_default_config = load_config("ssl_default_config") 17 | 18 | 19 | def load_and_merge_config(config_name: str): 20 | default_config = OmegaConf.create(dinov2_default_config) 21 | loaded_config = load_config(config_name) 22 | return OmegaConf.merge(default_config, loaded_config) 23 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vitb14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_base 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 224 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vitb14_reg4_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_base 3 | patch_size: 14 4 | num_register_tokens: 4 5 | interpolate_antialias: true 6 | interpolate_offset: 0.0 7 | crops: 8 | global_crops_size: 518 # this is to set up the position embeddings properly 9 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vitg14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_giant2 3 | patch_size: 14 4 | ffn_layer: swiglufused 5 | crops: 6 | global_crops_size: 224 # this is to set up the position embeddings properly 7 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vitg14_reg4_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_giant2 3 | patch_size: 14 4 | ffn_layer: swiglufused 5 | num_register_tokens: 4 6 | interpolate_antialias: true 7 | interpolate_offset: 0.0 8 | crops: 9 | global_crops_size: 518 # this is to set up the position embeddings properly 10 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vitl14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_large 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 224 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vitl14_reg4_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_large 3 | patch_size: 14 4 | num_register_tokens: 4 5 | interpolate_antialias: true 6 | interpolate_offset: 0.0 7 | crops: 8 | global_crops_size: 518 # this is to set up the position embeddings properly 9 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vits14_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_small 3 | patch_size: 14 4 | crops: 5 | global_crops_size: 224 # this is to set up the position embeddings properly 6 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/eval/vits14_reg4_pretrain.yaml: -------------------------------------------------------------------------------- 1 | student: 2 | arch: vit_small 3 | patch_size: 14 4 | num_register_tokens: 4 5 | interpolate_antialias: true 6 | interpolate_offset: 0.0 7 | crops: 8 | global_crops_size: 518 # this is to set up the position embeddings properly 9 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/train/vitg14.yaml: -------------------------------------------------------------------------------- 1 | dino: 2 | head_n_prototypes: 131072 3 | head_bottleneck_dim: 384 4 | ibot: 5 | separate_head: true 6 | head_n_prototypes: 131072 7 | train: 8 | batch_size_per_gpu: 12 9 | dataset_path: ImageNet22k 10 | centering: sinkhorn_knopp 11 | student: 12 | arch: vit_giant2 13 | patch_size: 14 14 | drop_path_rate: 0.4 15 | ffn_layer: swiglufused 16 | block_chunks: 4 17 | teacher: 18 | momentum_teacher: 0.994 19 | optim: 20 | epochs: 500 21 | weight_decay_end: 0.2 22 | base_lr: 2.0e-04 # learning rate for a batch size of 1024 23 | warmup_epochs: 80 24 | layerwise_decay: 1.0 25 | crops: 26 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/train/vitl14.yaml: -------------------------------------------------------------------------------- 1 | dino: 2 | head_n_prototypes: 131072 3 | head_bottleneck_dim: 384 4 | ibot: 5 | separate_head: true 6 | head_n_prototypes: 131072 7 | train: 8 | batch_size_per_gpu: 32 9 | dataset_path: ImageNet22k 10 | centering: sinkhorn_knopp 11 | student: 12 | arch: vit_large 13 | patch_size: 14 14 | drop_path_rate: 0.4 15 | ffn_layer: swiglufused 16 | block_chunks: 4 17 | teacher: 18 | momentum_teacher: 0.994 19 | optim: 20 | epochs: 500 21 | weight_decay_end: 0.2 22 | base_lr: 2.0e-04 # learning rate for a batch size of 1024 23 | warmup_epochs: 80 24 | layerwise_decay: 1.0 25 | crops: 26 | local_crops_size: 98 -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/configs/train/vitl16_short.yaml: -------------------------------------------------------------------------------- 1 | # this corresponds to the default config 2 | train: 3 | dataset_path: ImageNet:split=TRAIN 4 | batch_size_per_gpu: 64 5 | student: 6 | block_chunks: 4 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .adapters import DatasetWithEnumeratedTargets 7 | from .loaders import make_data_loader, make_dataset, SamplerType 8 | from .collate import collate_data_and_cast 9 | from .masking import MaskingGenerator 10 | from .augmentations import DataAugmentationDINO 11 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/data/adapters.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from typing import Any, Tuple 7 | 8 | from torch.utils.data import Dataset 9 | 10 | 11 | class DatasetWithEnumeratedTargets(Dataset): 12 | def __init__(self, dataset): 13 | self._dataset = dataset 14 | 15 | def get_image_data(self, index: int) -> bytes: 16 | return self._dataset.get_image_data(index) 17 | 18 | def get_target(self, index: int) -> Tuple[Any, int]: 19 | target = self._dataset.get_target(index) 20 | return (index, target) 21 | 22 | def __getitem__(self, index: int) -> Tuple[Any, Tuple[Any, int]]: 23 | image, target = self._dataset[index] 24 | target = index if target is None else target 25 | return image, (index, target) 26 | 27 | def __len__(self) -> int: 28 | return len(self._dataset) 29 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .image_net import ImageNet 7 | from .image_net_22k import ImageNet22k 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/data/datasets/decoders.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from io import BytesIO 7 | from typing import Any 8 | 9 | from PIL import Image 10 | 11 | 12 | class Decoder: 13 | def decode(self) -> Any: 14 | raise NotImplementedError 15 | 16 | 17 | class ImageDataDecoder(Decoder): 18 | def __init__(self, image_data: bytes) -> None: 19 | self._image_data = image_data 20 | 21 | def decode(self) -> Image: 22 | f = BytesIO(self._image_data) 23 | return Image.open(f).convert(mode="RGB") 24 | 25 | 26 | class TargetDecoder(Decoder): 27 | def __init__(self, target: Any): 28 | self._target = target 29 | 30 | def decode(self) -> Any: 31 | return self._target 32 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/data/datasets/extended.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from typing import Any, Tuple 7 | 8 | from torchvision.datasets import VisionDataset 9 | 10 | from .decoders import TargetDecoder, ImageDataDecoder 11 | 12 | 13 | class ExtendedVisionDataset(VisionDataset): 14 | def __init__(self, *args, **kwargs) -> None: 15 | super().__init__(*args, **kwargs) # type: ignore 16 | 17 | def get_image_data(self, index: int) -> bytes: 18 | raise NotImplementedError 19 | 20 | def get_target(self, index: int) -> Any: 21 | raise NotImplementedError 22 | 23 | def __getitem__(self, index: int) -> Tuple[Any, Any]: 24 | try: 25 | image_data = self.get_image_data(index) 26 | image = ImageDataDecoder(image_data).decode() 27 | except Exception as e: 28 | raise RuntimeError(f"can not read image for sample {index}") from e 29 | target = self.get_target(index) 30 | target = TargetDecoder(target).decode() 31 | 32 | if self.transforms is not None: 33 | image, target = self.transforms(image, target) 34 | 35 | return image, target 36 | 37 | def __len__(self) -> int: 38 | raise NotImplementedError 39 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .backbones import * # noqa: F403 7 | from .builder import BACKBONES, DEPTHER, HEADS, LOSSES, build_backbone, build_depther, build_head, build_loss 8 | from .decode_heads import * # noqa: F403 9 | from .depther import * # noqa: F403 10 | from .losses import * # noqa: F403 11 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .vision_transformer import DinoVisionTransformer 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/models/backbones/vision_transformer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from mmcv.runner import BaseModule 7 | 8 | from ..builder import BACKBONES 9 | 10 | 11 | @BACKBONES.register_module() 12 | class DinoVisionTransformer(BaseModule): 13 | """Vision Transformer.""" 14 | 15 | def __init__(self, *args, **kwargs): 16 | super().__init__() 17 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .dpt_head import DPTHead 7 | from .linear_head import BNHead 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/models/depther/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .base import BaseDepther 7 | from .encoder_decoder import DepthEncoderDecoder 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .gradientloss import GradientLoss 7 | from .sigloss import SigLoss 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .wrappers import resize 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/depth/ops/wrappers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | import warnings 7 | 8 | import torch.nn.functional as F 9 | 10 | 11 | def resize(input, size=None, scale_factor=None, mode="nearest", align_corners=None, warning=False): 12 | if warning: 13 | if size is not None and align_corners: 14 | input_h, input_w = tuple(int(x) for x in input.shape[2:]) 15 | output_h, output_w = tuple(int(x) for x in size) 16 | if output_h > input_h or output_w > output_h: 17 | if ( 18 | (output_h > 1 and output_w > 1 and input_h > 1 and input_w > 1) 19 | and (output_h - 1) % (input_h - 1) 20 | and (output_w - 1) % (input_w - 1) 21 | ): 22 | warnings.warn( 23 | f"When align_corners={align_corners}, " 24 | "the output would more aligned if " 25 | f"input size {(input_h, input_w)} is `x+1` and " 26 | f"out size {(output_h, output_w)} is `nx+1`" 27 | ) 28 | return F.interpolate(input, size, scale_factor, mode, align_corners) 29 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .optimizer import DistOptimizerHook 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .backbones import * # noqa: F403 7 | from .decode_heads import * # noqa: F403 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .vision_transformer import DinoVisionTransformer 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation/models/backbones/vision_transformer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from mmcv.runner import BaseModule 7 | from mmseg.models.builder import BACKBONES 8 | 9 | 10 | @BACKBONES.register_module() 11 | class DinoVisionTransformer(BaseModule): 12 | """Vision Transformer.""" 13 | 14 | def __init__( 15 | self, 16 | *args, 17 | **kwargs, 18 | ): 19 | super().__init__() 20 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .linear_head import BNHead 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .core import * # noqa: F403 7 | from .models import * # noqa: F403 8 | from .ops import * # noqa: F403 9 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from mmseg.core.evaluation import * # noqa: F403 7 | from mmseg.core.seg import * # noqa: F403 8 | 9 | from .anchor import * # noqa: F403 10 | from .box import * # noqa: F403 11 | from .utils import * # noqa: F403 12 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/anchor/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .point_generator import MlvlPointGenerator # noqa: F403 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/anchor/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | import warnings 7 | 8 | from mmcv.utils import Registry, build_from_cfg 9 | 10 | PRIOR_GENERATORS = Registry("Generator for anchors and points") 11 | 12 | ANCHOR_GENERATORS = PRIOR_GENERATORS 13 | 14 | 15 | def build_prior_generator(cfg, default_args=None): 16 | return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) 17 | 18 | 19 | def build_anchor_generator(cfg, default_args=None): 20 | warnings.warn("``build_anchor_generator`` would be deprecated soon, please use " "``build_prior_generator`` ") 21 | return build_prior_generator(cfg, default_args=default_args) 22 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/box/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .builder import * # noqa: F403 7 | from .samplers import MaskPseudoSampler # noqa: F403 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/box/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from mmcv.utils import Registry, build_from_cfg 7 | 8 | BBOX_SAMPLERS = Registry("bbox_sampler") 9 | BBOX_CODERS = Registry("bbox_coder") 10 | 11 | 12 | def build_sampler(cfg, **default_args): 13 | """Builder of box sampler.""" 14 | return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) 15 | 16 | 17 | def build_bbox_coder(cfg, **default_args): 18 | """Builder of box coder.""" 19 | return build_from_cfg(cfg, BBOX_CODERS, default_args) 20 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/box/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .mask_pseudo_sampler import MaskPseudoSampler # noqa: F403 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .dist_utils import reduce_mean 7 | from .misc import add_prefix, multi_apply 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/core/utils/dist_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | import torch.distributed as dist 7 | 8 | 9 | def reduce_mean(tensor): 10 | """ "Obtain the mean of tensor on different GPUs.""" 11 | if not (dist.is_available() and dist.is_initialized()): 12 | return tensor 13 | tensor = tensor.clone() 14 | dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) 15 | return tensor 16 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .backbones import * # noqa: F403 7 | from .builder import MASK_ASSIGNERS, MATCH_COST, TRANSFORMER, build_assigner, build_match_cost 8 | from .decode_heads import * # noqa: F403 9 | from .losses import * # noqa: F403 10 | from .plugins import * # noqa: F403 11 | from .segmentors import * # noqa: F403 12 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .vit_adapter import ViTAdapter 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/backbones/drop_path.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | # References: 7 | # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py 8 | # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py 9 | 10 | from torch import nn 11 | 12 | 13 | def drop_path(x, drop_prob: float = 0.0, training: bool = False): 14 | if drop_prob == 0.0 or not training: 15 | return x 16 | keep_prob = 1 - drop_prob 17 | shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets 18 | random_tensor = x.new_empty(shape).bernoulli_(keep_prob) 19 | if keep_prob > 0.0: 20 | random_tensor.div_(keep_prob) 21 | return x * random_tensor 22 | 23 | 24 | class DropPath(nn.Module): 25 | """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" 26 | 27 | def __init__(self, drop_prob: float = 0.0): 28 | super(DropPath, self).__init__() 29 | self.drop_prob = drop_prob 30 | 31 | def forward(self, x): 32 | return drop_path(x, self.drop_prob, self.training) 33 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from mmcv.utils import Registry 7 | 8 | TRANSFORMER = Registry("Transformer") 9 | MASK_ASSIGNERS = Registry("mask_assigner") 10 | MATCH_COST = Registry("match_cost") 11 | 12 | 13 | def build_match_cost(cfg): 14 | """Build Match Cost.""" 15 | return MATCH_COST.build(cfg) 16 | 17 | 18 | def build_assigner(cfg): 19 | """Build Assigner.""" 20 | return MASK_ASSIGNERS.build(cfg) 21 | 22 | 23 | def build_transformer(cfg): 24 | """Build Transformer.""" 25 | return TRANSFORMER.build(cfg) 26 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .mask2former_head import Mask2FormerHead 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .cross_entropy_loss import CrossEntropyLoss, binary_cross_entropy, cross_entropy, mask_cross_entropy 7 | from .dice_loss import DiceLoss 8 | from .match_costs import ClassificationCost, CrossEntropyLossCost, DiceCost 9 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .encoder_decoder_mask2former import EncoderDecoderMask2Former 7 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .assigner import MaskHungarianAssigner 7 | from .point_sample import get_uncertain_point_coords_with_randomness 8 | from .positional_encoding import LearnedPositionalEncoding, SinePositionalEncoding 9 | from .transformer import DetrTransformerDecoder, DetrTransformerDecoderLayer, DynamicConv, Transformer 10 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/eval/segmentation_m2f/ops/modules/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | # References: 7 | # https://github.com/fundamentalvision/Deformable-DETR/tree/main/models/ops/modules 8 | # https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 9 | 10 | from .ms_deform_attn import MSDeformAttn 11 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/hub/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/hub/depth/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .decode_heads import BNHead, DPTHead 7 | from .encoder_decoder import DepthEncoderDecoder 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/hub/depth/ops.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | import warnings 7 | 8 | import torch.nn.functional as F 9 | 10 | 11 | def resize(input, size=None, scale_factor=None, mode="nearest", align_corners=None, warning=False): 12 | if warning: 13 | if size is not None and align_corners: 14 | input_h, input_w = tuple(int(x) for x in input.shape[2:]) 15 | output_h, output_w = tuple(int(x) for x in size) 16 | if output_h > input_h or output_w > output_h: 17 | if ( 18 | (output_h > 1 and output_w > 1 and input_h > 1 and input_w > 1) 19 | and (output_h - 1) % (input_h - 1) 20 | and (output_w - 1) % (input_w - 1) 21 | ): 22 | warnings.warn( 23 | f"When align_corners={align_corners}, " 24 | "the output would more aligned if " 25 | f"input size {(input_h, input_w)} is `x+1` and " 26 | f"out size {(output_h, output_w)} is `nx+1`" 27 | ) 28 | return F.interpolate(input, size, scale_factor, mode, align_corners) 29 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .dino_head import DINOHead 7 | from .mlp import Mlp 8 | from .patch_embed import PatchEmbed 9 | from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused 10 | from .block import NestedTensorBlock 11 | from .attention import MemEffAttention 12 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/layers/drop_path.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | # References: 7 | # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py 8 | # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py 9 | 10 | 11 | from torch import nn 12 | 13 | 14 | def drop_path(x, drop_prob: float = 0.0, training: bool = False): 15 | if drop_prob == 0.0 or not training: 16 | return x 17 | keep_prob = 1 - drop_prob 18 | shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets 19 | random_tensor = x.new_empty(shape).bernoulli_(keep_prob) 20 | if keep_prob > 0.0: 21 | random_tensor.div_(keep_prob) 22 | output = x * random_tensor 23 | return output 24 | 25 | 26 | class DropPath(nn.Module): 27 | """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" 28 | 29 | def __init__(self, drop_prob=None): 30 | super(DropPath, self).__init__() 31 | self.drop_prob = drop_prob 32 | 33 | def forward(self, x): 34 | return drop_path(x, self.drop_prob, self.training) 35 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/layers/layer_scale.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | # Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110 7 | 8 | from typing import Union 9 | 10 | import torch 11 | from torch import Tensor 12 | from torch import nn 13 | 14 | 15 | class LayerScale(nn.Module): 16 | def __init__( 17 | self, 18 | dim: int, 19 | init_values: Union[float, Tensor] = 1e-5, 20 | inplace: bool = False, 21 | ) -> None: 22 | super().__init__() 23 | self.inplace = inplace 24 | self.gamma = nn.Parameter(init_values * torch.ones(dim)) 25 | 26 | def forward(self, x: Tensor) -> Tensor: 27 | return x.mul_(self.gamma) if self.inplace else x * self.gamma 28 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/loss/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .dino_clstoken_loss import DINOLoss 7 | from .ibot_patch_loss import iBOTPatchLoss 8 | from .koleo_loss import KoLeoLoss 9 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/run/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/train/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | from .train import get_args_parser, main 7 | from .ssl_meta_arch import SSLMetaArch 8 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/dinov2/utils/dtype.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Meta Platforms, Inc. and affiliates. 2 | # 3 | # This source code is licensed under the Apache License, Version 2.0 4 | # found in the LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from typing import Dict, Union 8 | 9 | import numpy as np 10 | import torch 11 | 12 | 13 | TypeSpec = Union[str, np.dtype, torch.dtype] 14 | 15 | 16 | _NUMPY_TO_TORCH_DTYPE: Dict[np.dtype, torch.dtype] = { 17 | np.dtype("bool"): torch.bool, 18 | np.dtype("uint8"): torch.uint8, 19 | np.dtype("int8"): torch.int8, 20 | np.dtype("int16"): torch.int16, 21 | np.dtype("int32"): torch.int32, 22 | np.dtype("int64"): torch.int64, 23 | np.dtype("float16"): torch.float16, 24 | np.dtype("float32"): torch.float32, 25 | np.dtype("float64"): torch.float64, 26 | np.dtype("complex64"): torch.complex64, 27 | np.dtype("complex128"): torch.complex128, 28 | } 29 | 30 | 31 | def as_torch_dtype(dtype: TypeSpec) -> torch.dtype: 32 | if isinstance(dtype, torch.dtype): 33 | return dtype 34 | if isinstance(dtype, str): 35 | dtype = np.dtype(dtype) 36 | assert isinstance(dtype, np.dtype), f"Expected an instance of nunpy dtype, got {type(dtype)}" 37 | return _NUMPY_TO_TORCH_DTYPE[dtype] 38 | -------------------------------------------------------------------------------- /eval/ImageNet_probing/run_probing.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | python -m torch.distributed.launch --nproc_per_node=4 --use_env dinov2/eval/linear_proteus.py \ 5 | --config-file dinov2/configs/eval/vitl14_pretrain.yaml \ 6 | --pretrained-weights path_pretrained_model \ 7 | --train-dataset ImageNet:split=TRAIN:root=imagenet_path:extra=imagenet_path \ 8 | --val-dataset ImageNet:split=VAL:root=imagenet_path:extra=imagenet_path \ 9 | --batch-size 128 --epoch-length 2502 \ 10 | --epochs 10 \ 11 | --output-dir log/linear_probing; -------------------------------------------------------------------------------- /eval/dense_prediction/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/.DS_Store -------------------------------------------------------------------------------- /eval/dense_prediction/README.md: -------------------------------------------------------------------------------- 1 | # Semantic Segmentation and Depth Estimation 2 | 3 | 4 | ## Installation 5 | Please follow the installation instructions in [mmsegmentation](https://github.com/open-mmlab/mmsegmentation/blob/main/docs/en/get_started.md#installation). 6 | 7 | ## Dataset 8 | Please follow the guidelines in mmsegmentation to prepare [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/main/docs/en/user_guides/2_dataset_prepare.md#ade20k) for Semantic Segmentation and [NYU-Depth V2](https://github.com/open-mmlab/mmsegmentation/blob/main/docs/en/user_guides/2_dataset_prepare.md#nyu) for Depth Estimation. 9 | 10 | ## Training 11 | 1. Run the training script as follows to modify the pretrained checkpoint format: 12 | 13 | ``` 14 | bash prepare_ckpt.sh 15 | ``` 16 | 2. Specify the config file from `configs` in `run.sh`. 17 | 3. Simply run the training script as follows: 18 | 19 | ``` 20 | bash run.sh 21 | ``` 22 | 23 | ## Acknowledgment 24 | 25 | This part is heavily build upon [mmsegmentation](https://github.com/open-mmlab/mmsegmentation/tree/main) and [Monocular-Depth-Estimation-Toolbox](https://github.com/zhyever/Monocular-Depth-Estimation-Toolbox/tree/main). We gratefully thank the authors for their wonderful works. -------------------------------------------------------------------------------- /eval/dense_prediction/configs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/configs/.DS_Store -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | default_scope = 'mmseg' 2 | env_cfg = dict( 3 | cudnn_benchmark=True, 4 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), 5 | dist_cfg=dict(backend='nccl'), 6 | ) 7 | vis_backends = [dict(type='LocalVisBackend')] 8 | visualizer = dict( 9 | type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer') 10 | log_processor = dict(by_epoch=False) 11 | log_level = 'INFO' 12 | load_from = None 13 | resume = False 14 | 15 | tta_model = dict(type='SegTTAModel') 16 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/models/linear_mae_probing.py: -------------------------------------------------------------------------------- 1 | norm_cfg = dict(type='SyncBN', requires_grad=True) 2 | data_preprocessor = dict( 3 | type='SegDataPreProcessor', 4 | mean=[123.675, 116.28, 103.53], 5 | std=[58.395, 57.12, 57.375], 6 | bgr_to_rgb=True, 7 | pad_val=0, 8 | seg_pad_val=255) 9 | model = dict( 10 | type='EncoderDecoder', 11 | data_preprocessor=data_preprocessor, 12 | pretrained=None, 13 | backbone=dict( 14 | type='MAE_fix', 15 | img_size=(518, 518), 16 | patch_size=14, 17 | embed_dims=384, 18 | num_layers=12, 19 | num_heads=6, 20 | mlp_ratio=4, 21 | init_values=1.0, 22 | drop_path_rate=0.1, 23 | out_indices=[3, 5, 7, 11], 24 | frozen_stages=12), 25 | decode_head=dict( 26 | type='FCNHead', 27 | in_channels=[384, 384, 384, 384], 28 | in_index=[0, 1, 2, 3], 29 | input_transform='resize_concat', 30 | channels=1536, 31 | num_convs=0, 32 | concat_input=False, 33 | dropout_ratio=0, 34 | num_classes=150, 35 | norm_cfg=norm_cfg, 36 | align_corners=False, 37 | loss_decode=dict( 38 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 39 | # model training and testing settings 40 | train_cfg=dict(), 41 | test_cfg=dict(mode='whole')) 42 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=160000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 160k 15 | train_cfg = dict( 16 | type='IterBasedTrainLoop', max_iters=160000, val_interval=16000) 17 | val_cfg = dict(type='ValLoop') 18 | test_cfg = dict(type='TestLoop') 19 | default_hooks = dict( 20 | timer=dict(type='IterTimerHook'), 21 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 22 | param_scheduler=dict(type='ParamSchedulerHook'), 23 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=16000), 24 | sampler_seed=dict(type='DistSamplerSeedHook'), 25 | visualization=dict(type='SegVisualizationHook')) 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=20000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 20k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=20000, val_interval=2000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_240k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=240000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 240k 15 | train_cfg = dict( 16 | type='IterBasedTrainLoop', max_iters=240000, val_interval=24000) 17 | val_cfg = dict(type='ValLoop') 18 | test_cfg = dict(type='TestLoop') 19 | default_hooks = dict( 20 | timer=dict(type='IterTimerHook'), 21 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 22 | param_scheduler=dict(type='ParamSchedulerHook'), 23 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=24000), 24 | sampler_seed=dict(type='DistSamplerSeedHook'), 25 | visualization=dict(type='SegVisualizationHook')) 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_25k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.1) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='LinearLR', start_factor=3e-2, begin=0, end=12000, 8 | by_epoch=False), 9 | dict( 10 | type='PolyLRRatio', 11 | eta_min_ratio=3e-2, 12 | power=0.9, 13 | begin=12000, 14 | end=24000, 15 | by_epoch=False), 16 | dict(type='ConstantLR', by_epoch=False, factor=1, begin=24000, end=25000) 17 | ] 18 | # training schedule for 25k 19 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=25000, val_interval=1000) 20 | val_cfg = dict(type='ValLoop') 21 | test_cfg = dict(type='TestLoop') 22 | default_hooks = dict( 23 | timer=dict(type='IterTimerHook'), 24 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 25 | param_scheduler=dict(type='ParamSchedulerHook'), 26 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), 27 | sampler_seed=dict(type='DistSamplerSeedHook'), 28 | visualization=dict(type='SegVisualizationHook')) 29 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_320k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=320000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 320k 15 | train_cfg = dict( 16 | type='IterBasedTrainLoop', max_iters=320000, val_interval=32000) 17 | val_cfg = dict(type='ValLoop') 18 | test_cfg = dict(type='TestLoop') 19 | default_hooks = dict( 20 | timer=dict(type='IterTimerHook'), 21 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 22 | param_scheduler=dict(type='ParamSchedulerHook'), 23 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=32000), 24 | sampler_seed=dict(type='DistSamplerSeedHook'), 25 | visualization=dict(type='SegVisualizationHook')) 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=40000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 40k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=4000), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_60k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=60000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 80k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=60000, val_interval=2000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000, max_keep_ckpts=10), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) -------------------------------------------------------------------------------- /eval/dense_prediction/configs/_base_/schedules/schedule_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=80000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 80k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=80000, val_interval=8000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=8000), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/dpt/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/configs/dpt/.DS_Store -------------------------------------------------------------------------------- /eval/dense_prediction/configs/dpt/metafile.yaml: -------------------------------------------------------------------------------- 1 | Collections: 2 | - Name: DPT 3 | License: Apache License 2.0 4 | Metadata: 5 | Training Data: 6 | - ADE20K 7 | Paper: 8 | Title: Vision Transformer for Dense Prediction 9 | URL: https://arxiv.org/abs/2103.13413 10 | README: configs/dpt/README.md 11 | Frameworks: 12 | - PyTorch 13 | Models: 14 | - Name: dpt_vit-b16_8xb2-160k_ade20k-512x512 15 | In Collection: DPT 16 | Results: 17 | Task: Semantic Segmentation 18 | Dataset: ADE20K 19 | Metrics: 20 | mIoU: 46.97 21 | mIoU(ms+flip): 48.34 22 | Config: configs/dpt/dpt_vit-b16_8xb2-160k_ade20k-512x512.py 23 | Metadata: 24 | Training Data: ADE20K 25 | Batch Size: 16 26 | Architecture: 27 | - ViT-B 28 | - DPT 29 | Training Resources: 8x V100 GPUS 30 | Memory (GB): 8.09 31 | Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth 32 | Training log: https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-20210809_172025.log.json 33 | Paper: 34 | Title: Vision Transformer for Dense Prediction 35 | URL: https://arxiv.org/abs/2103.13413 36 | Code: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dpt_head.py#L215 37 | Framework: PyTorch 38 | -------------------------------------------------------------------------------- /eval/dense_prediction/configs/mae/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/configs/mae/.DS_Store -------------------------------------------------------------------------------- /eval/dense_prediction/configs/mae/metafile.yaml: -------------------------------------------------------------------------------- 1 | Models: 2 | - Name: mae-base_upernet_8xb2-amp-160k_ade20k-512x512 3 | In Collection: UPerNet 4 | Results: 5 | Task: Semantic Segmentation 6 | Dataset: ADE20K 7 | Metrics: 8 | mIoU: 48.13 9 | mIoU(ms+flip): 48.7 10 | Config: configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py 11 | Metadata: 12 | Training Data: ADE20K 13 | Batch Size: 16 14 | Architecture: 15 | - ViT-B 16 | - UPerNet 17 | Training Resources: 8x V100 GPUS 18 | Memory (GB): 9.96 19 | Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth 20 | Training log: https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752.log.json 21 | Paper: 22 | Title: Masked Autoencoders Are Scalable Vision Learners 23 | URL: https://arxiv.org/abs/2111.06377 24 | Code: https://github.com/open-mmlab/mmsegmentation/blob/v0.24.0/mmseg/models/backbones/mae.py#L46 25 | Framework: PyTorch 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | default_scope = 'mmseg' 2 | env_cfg = dict( 3 | cudnn_benchmark=True, 4 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), 5 | dist_cfg=dict(backend='nccl'), 6 | ) 7 | vis_backends = [dict(type='LocalVisBackend')] 8 | visualizer = dict( 9 | type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer') 10 | log_processor = dict(by_epoch=False) 11 | log_level = 'INFO' 12 | load_from = None 13 | resume = False 14 | 15 | tta_model = dict(type='SegTTAModel') 16 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/models/linear_mae_probing.py: -------------------------------------------------------------------------------- 1 | norm_cfg = dict(type='SyncBN', requires_grad=True) 2 | data_preprocessor = dict( 3 | type='SegDataPreProcessor', 4 | mean=[123.675, 116.28, 103.53], 5 | std=[58.395, 57.12, 57.375], 6 | bgr_to_rgb=True, 7 | pad_val=0, 8 | seg_pad_val=255) 9 | model = dict( 10 | type='EncoderDecoder', 11 | data_preprocessor=data_preprocessor, 12 | pretrained=None, 13 | backbone=dict( 14 | type='MAE_fix', 15 | img_size=(518, 518), 16 | patch_size=14, 17 | embed_dims=384, 18 | num_layers=12, 19 | num_heads=6, 20 | mlp_ratio=4, 21 | init_values=1.0, 22 | drop_path_rate=0.1, 23 | out_indices=[3, 5, 7, 11], 24 | frozen_stages=12), 25 | decode_head=dict( 26 | type='FCNHead', 27 | in_channels=[384, 384, 384, 384], 28 | in_index=[0, 1, 2, 3], 29 | input_transform='resize_concat', 30 | channels=1536, 31 | num_convs=0, 32 | concat_input=False, 33 | dropout_ratio=0, 34 | num_classes=150, 35 | norm_cfg=norm_cfg, 36 | align_corners=False, 37 | loss_decode=dict( 38 | type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), 39 | # model training and testing settings 40 | train_cfg=dict(), 41 | test_cfg=dict(mode='whole')) 42 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=160000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 160k 15 | train_cfg = dict( 16 | type='IterBasedTrainLoop', max_iters=160000, val_interval=16000) 17 | val_cfg = dict(type='ValLoop') 18 | test_cfg = dict(type='TestLoop') 19 | default_hooks = dict( 20 | timer=dict(type='IterTimerHook'), 21 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 22 | param_scheduler=dict(type='ParamSchedulerHook'), 23 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=16000), 24 | sampler_seed=dict(type='DistSamplerSeedHook'), 25 | visualization=dict(type='SegVisualizationHook')) 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=20000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 20k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=20000, val_interval=2000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_240k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=240000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 240k 15 | train_cfg = dict( 16 | type='IterBasedTrainLoop', max_iters=240000, val_interval=24000) 17 | val_cfg = dict(type='ValLoop') 18 | test_cfg = dict(type='TestLoop') 19 | default_hooks = dict( 20 | timer=dict(type='IterTimerHook'), 21 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 22 | param_scheduler=dict(type='ParamSchedulerHook'), 23 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=24000), 24 | sampler_seed=dict(type='DistSamplerSeedHook'), 25 | visualization=dict(type='SegVisualizationHook')) 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_25k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.1) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='LinearLR', start_factor=3e-2, begin=0, end=12000, 8 | by_epoch=False), 9 | dict( 10 | type='PolyLRRatio', 11 | eta_min_ratio=3e-2, 12 | power=0.9, 13 | begin=12000, 14 | end=24000, 15 | by_epoch=False), 16 | dict(type='ConstantLR', by_epoch=False, factor=1, begin=24000, end=25000) 17 | ] 18 | # training schedule for 25k 19 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=25000, val_interval=1000) 20 | val_cfg = dict(type='ValLoop') 21 | test_cfg = dict(type='TestLoop') 22 | default_hooks = dict( 23 | timer=dict(type='IterTimerHook'), 24 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 25 | param_scheduler=dict(type='ParamSchedulerHook'), 26 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000), 27 | sampler_seed=dict(type='DistSamplerSeedHook'), 28 | visualization=dict(type='SegVisualizationHook')) 29 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_320k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=320000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 320k 15 | train_cfg = dict( 16 | type='IterBasedTrainLoop', max_iters=320000, val_interval=32000) 17 | val_cfg = dict(type='ValLoop') 18 | test_cfg = dict(type='TestLoop') 19 | default_hooks = dict( 20 | timer=dict(type='IterTimerHook'), 21 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 22 | param_scheduler=dict(type='ParamSchedulerHook'), 23 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=32000), 24 | sampler_seed=dict(type='DistSamplerSeedHook'), 25 | visualization=dict(type='SegVisualizationHook')) 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=40000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 40k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=40000, val_interval=4000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=4000), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_60k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=60000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 80k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=60000, val_interval=2000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=2000, max_keep_ckpts=10), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/_base_/schedules/schedule_80k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None) 4 | # learning policy 5 | param_scheduler = [ 6 | dict( 7 | type='PolyLR', 8 | eta_min=1e-4, 9 | power=0.9, 10 | begin=0, 11 | end=80000, 12 | by_epoch=False) 13 | ] 14 | # training schedule for 80k 15 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=80000, val_interval=8000) 16 | val_cfg = dict(type='ValLoop') 17 | test_cfg = dict(type='TestLoop') 18 | default_hooks = dict( 19 | timer=dict(type='IterTimerHook'), 20 | logger=dict(type='LoggerHook', interval=50, log_metric_by_epoch=False), 21 | param_scheduler=dict(type='ParamSchedulerHook'), 22 | checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=8000), 23 | sampler_seed=dict(type='DistSamplerSeedHook'), 24 | visualization=dict(type='SegVisualizationHook')) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/dpt/metafile.yaml: -------------------------------------------------------------------------------- 1 | Collections: 2 | - Name: DPT 3 | License: Apache License 2.0 4 | Metadata: 5 | Training Data: 6 | - ADE20K 7 | Paper: 8 | Title: Vision Transformer for Dense Prediction 9 | URL: https://arxiv.org/abs/2103.13413 10 | README: configs/dpt/README.md 11 | Frameworks: 12 | - PyTorch 13 | Models: 14 | - Name: dpt_vit-b16_8xb2-160k_ade20k-512x512 15 | In Collection: DPT 16 | Results: 17 | Task: Semantic Segmentation 18 | Dataset: ADE20K 19 | Metrics: 20 | mIoU: 46.97 21 | mIoU(ms+flip): 48.34 22 | Config: configs/dpt/dpt_vit-b16_8xb2-160k_ade20k-512x512.py 23 | Metadata: 24 | Training Data: ADE20K 25 | Batch Size: 16 26 | Architecture: 27 | - ViT-B 28 | - DPT 29 | Training Resources: 8x V100 GPUS 30 | Memory (GB): 8.09 31 | Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth 32 | Training log: https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-20210809_172025.log.json 33 | Paper: 34 | Title: Vision Transformer for Dense Prediction 35 | URL: https://arxiv.org/abs/2103.13413 36 | Code: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dpt_head.py#L215 37 | Framework: PyTorch 38 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/configs/mae/metafile.yaml: -------------------------------------------------------------------------------- 1 | Models: 2 | - Name: mae-base_upernet_8xb2-amp-160k_ade20k-512x512 3 | In Collection: UPerNet 4 | Results: 5 | Task: Semantic Segmentation 6 | Dataset: ADE20K 7 | Metrics: 8 | mIoU: 48.13 9 | mIoU(ms+flip): 48.7 10 | Config: configs/mae/mae-base_upernet_8xb2-amp-160k_ade20k-512x512.py 11 | Metadata: 12 | Training Data: ADE20K 13 | Batch Size: 16 14 | Architecture: 15 | - ViT-B 16 | - UPerNet 17 | Training Resources: 8x V100 GPUS 18 | Memory (GB): 9.96 19 | Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth 20 | Training log: https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752.log.json 21 | Paper: 22 | Title: Masked Autoencoders Are Scalable Vision Learners 23 | URL: https://arxiv.org/abs/2111.06377 24 | Code: https://github.com/open-mmlab/mmsegmentation/blob/v0.24.0/mmseg/models/backbones/mae.py#L46 25 | Framework: PyTorch 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | CONFIG=$1 2 | CHECKPOINT=$2 3 | GPUS=$3 4 | NNODES=${NNODES:-1} 5 | NODE_RANK=${NODE_RANK:-0} 6 | PORT=${PORT:-29500} 7 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 8 | 9 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 10 | python -m torch.distributed.launch \ 11 | --nnodes=$NNODES \ 12 | --node_rank=$NODE_RANK \ 13 | --master_addr=$MASTER_ADDR \ 14 | --nproc_per_node=$GPUS \ 15 | --master_port=$PORT \ 16 | $(dirname "$0")/test.py \ 17 | $CONFIG \ 18 | $CHECKPOINT \ 19 | --launcher pytorch \ 20 | ${@:4} 21 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | CONFIG=$1 2 | GPUS=$2 3 | NNODES=${NNODES:-1} 4 | NODE_RANK=${NODE_RANK:-0} 5 | PORT=${PORT:-29500} 6 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 7 | 8 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 9 | python -m torch.distributed.launch \ 10 | --nnodes=$NNODES \ 11 | --node_rank=$NODE_RANK \ 12 | --master_addr=$MASTER_ADDR \ 13 | --nproc_per_node=$GPUS \ 14 | --master_port=$PORT \ 15 | $(dirname "$0")/train.py \ 16 | $CONFIG \ 17 | --launcher pytorch ${@:3} \ 18 | --resume -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-4} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-4} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/.mim/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | GPUS=${GPUS:-4} 9 | GPUS_PER_NODE=${GPUS_PER_NODE:-4} 10 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 11 | SRUN_ARGS=${SRUN_ARGS:-""} 12 | PY_ARGS=${@:4} 13 | 14 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/__pycache__/checkpoint.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/__pycache__/checkpoint.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/__pycache__/version.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/__pycache__/version.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/apis/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .inference import inference_model, init_model, show_result_pyplot 3 | from .mmseg_inferencer import MMSegInferencer 4 | from .remote_sense_inferencer import RSImage, RSInferencer 5 | 6 | __all__ = [ 7 | 'init_model', 'inference_model', 'show_result_pyplot', 'MMSegInferencer', 8 | 'RSInferencer', 'RSImage' 9 | ] 10 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/apis/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/apis/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/apis/__pycache__/inference.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/apis/__pycache__/inference.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/apis/__pycache__/mmseg_inferencer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/apis/__pycache__/mmseg_inferencer.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/apis/__pycache__/remote_sense_inferencer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/apis/__pycache__/remote_sense_inferencer.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/apis/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/apis/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/apis/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from collections import defaultdict 3 | from typing import Sequence, Union 4 | 5 | import numpy as np 6 | from mmengine.dataset import Compose 7 | from mmengine.model import BaseModel 8 | 9 | ImageType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]] 10 | 11 | 12 | def _preprare_data(imgs: ImageType, model: BaseModel): 13 | 14 | cfg = model.cfg 15 | for t in cfg.test_pipeline: 16 | if t.get('type') == 'LoadAnnotations': 17 | cfg.test_pipeline.remove(t) 18 | 19 | is_batch = True 20 | if not isinstance(imgs, (list, tuple)): 21 | imgs = [imgs] 22 | is_batch = False 23 | 24 | if isinstance(imgs[0], np.ndarray): 25 | cfg.test_pipeline[0]['type'] = 'LoadImageFromNDArray' 26 | 27 | # TODO: Consider using the singleton pattern to avoid building 28 | # a pipeline for each inference 29 | pipeline = Compose(cfg.test_pipeline) 30 | 31 | data = defaultdict(list) 32 | for img in imgs: 33 | if isinstance(img, np.ndarray): 34 | data_ = dict(img=img) 35 | else: 36 | data_ = dict(img_path=img) 37 | data_ = pipeline(data_) 38 | data['inputs'].append(data_['inputs']) 39 | data['data_samples'].append(data_['data_samples']) 40 | 41 | return data, is_batch 42 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | 3 | from mmengine.visualization import LocalVisBackend 4 | 5 | from mmseg.models import SegTTAModel 6 | from mmseg.visualization import SegLocalVisualizer 7 | 8 | env_cfg = dict( 9 | cudnn_benchmark=False, 10 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), 11 | dist_cfg=dict(backend='nccl'), 12 | ) 13 | vis_backends = [dict(type=LocalVisBackend)] 14 | visualizer = dict( 15 | type=SegLocalVisualizer, vis_backends=vis_backends, name='visualizer') 16 | log_processor = dict(by_epoch=False) 17 | log_level = 'INFO' 18 | load_from = None 19 | resume = False 20 | 21 | tta_model = dict(type=SegTTAModel) 22 | default_scope = None 23 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | # yapf: disable 3 | from .ade import ADE20KDataset 4 | from .nyu import NYUDataset 5 | 6 | 7 | # yapf: enable 8 | __all__ = [ 9 | 'ADE20KDataset', 'NYUDataset' 10 | ] 11 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/ade.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/ade.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/basesegdataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/basesegdataset.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/bdd100k.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/bdd100k.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/chase_db1.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/chase_db1.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/cityscapes.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/cityscapes.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/coco_stuff.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/coco_stuff.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/dark_zurich.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/dark_zurich.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/dataset_wrappers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/dataset_wrappers.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/decathlon.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/decathlon.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/drive.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/drive.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/dsdl.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/dsdl.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/hrf.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/hrf.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/hsi_drive.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/hsi_drive.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/isaid.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/isaid.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/isprs.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/isprs.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/levir.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/levir.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/lip.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/lip.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/loveda.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/loveda.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/mapillary.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/mapillary.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/night_driving.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/night_driving.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/nyu.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/nyu.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/pascal_context.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/pascal_context.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/potsdam.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/potsdam.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/refuge.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/refuge.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/stare.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/stare.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/synapse.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/synapse.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/__pycache__/voc.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/__pycache__/voc.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/transforms/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/transforms/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/transforms/__pycache__/formatting.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/transforms/__pycache__/formatting.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/transforms/__pycache__/loading.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/transforms/__pycache__/loading.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/datasets/transforms/__pycache__/transforms.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/datasets/transforms/__pycache__/transforms.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .hooks import SegVisualizationHook 3 | from .optimizers import (ForceDefaultOptimWrapperConstructor, 4 | LayerDecayOptimizerConstructor, 5 | LearningRateDecayOptimizerConstructor) 6 | from .schedulers import PolyLRRatio 7 | 8 | __all__ = [ 9 | 'LearningRateDecayOptimizerConstructor', 'LayerDecayOptimizerConstructor', 10 | 'SegVisualizationHook', 'PolyLRRatio', 11 | 'ForceDefaultOptimWrapperConstructor' 12 | ] 13 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .visualization_hook import SegVisualizationHook 3 | 4 | __all__ = ['SegVisualizationHook'] 5 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/hooks/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/hooks/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/hooks/__pycache__/visualization_hook.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/hooks/__pycache__/visualization_hook.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .force_default_constructor import ForceDefaultOptimWrapperConstructor 3 | from .layer_decay_optimizer_constructor import ( 4 | LayerDecayOptimizerConstructor, LearningRateDecayOptimizerConstructor) 5 | 6 | __all__ = [ 7 | 'LearningRateDecayOptimizerConstructor', 'LayerDecayOptimizerConstructor', 8 | 'ForceDefaultOptimWrapperConstructor' 9 | ] 10 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/optimizers/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/optimizers/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/optimizers/__pycache__/force_default_constructor.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/optimizers/__pycache__/force_default_constructor.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/optimizers/__pycache__/layer_decay_optimizer_constructor.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/optimizers/__pycache__/layer_decay_optimizer_constructor.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .poly_ratio_scheduler import PolyLRRatio 3 | 4 | __all__ = ['PolyLRRatio'] 5 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/schedulers/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/schedulers/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/engine/schedulers/__pycache__/poly_ratio_scheduler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/engine/schedulers/__pycache__/poly_ratio_scheduler.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .metrics import CityscapesMetric, DepthMetric, IoUMetric 3 | 4 | __all__ = ['IoUMetric', 'CityscapesMetric', 'DepthMetric'] 5 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/evaluation/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/evaluation/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/evaluation/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .citys_metric import CityscapesMetric 3 | from .depth_metric import DepthMetric 4 | from .iou_metric import IoUMetric 5 | 6 | __all__ = ['IoUMetric', 'CityscapesMetric', 'DepthMetric'] 7 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/citys_metric.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/citys_metric.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/depth_metric.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/depth_metric.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/iou_metric.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/evaluation/metrics/__pycache__/iou_metric.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .assigners import * # noqa: F401,F403 3 | from .backbones import * # noqa: F401,F403 4 | from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, 5 | build_head, build_loss, build_segmentor) 6 | from .data_preprocessor import SegDataPreProcessor 7 | from .decode_heads import * # noqa: F401,F403 8 | from .losses import * # noqa: F401,F403 9 | from .necks import * # noqa: F401,F403 10 | from .segmentors import * # noqa: F401,F403 11 | from .text_encoder import * # noqa: F401,F403 12 | 13 | __all__ = [ 14 | 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', 15 | 'build_head', 'build_loss', 'build_segmentor', 'SegDataPreProcessor' 16 | ] 17 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/__pycache__/builder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/__pycache__/builder.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/__pycache__/data_preprocessor.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/__pycache__/data_preprocessor.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/assigners/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_assigner import BaseAssigner 3 | from .hungarian_assigner import HungarianAssigner 4 | from .match_cost import ClassificationCost, CrossEntropyLossCost, DiceCost 5 | 6 | __all__ = [ 7 | 'BaseAssigner', 8 | 'HungarianAssigner', 9 | 'ClassificationCost', 10 | 'CrossEntropyLossCost', 11 | 'DiceCost', 12 | ] 13 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/assigners/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/assigners/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/assigners/__pycache__/base_assigner.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/assigners/__pycache__/base_assigner.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/assigners/__pycache__/hungarian_assigner.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/assigners/__pycache__/hungarian_assigner.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/assigners/__pycache__/match_cost.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/assigners/__pycache__/match_cost.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/assigners/base_assigner.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | from typing import Optional 4 | 5 | from mmengine.structures import InstanceData 6 | 7 | 8 | class BaseAssigner(metaclass=ABCMeta): 9 | """Base assigner that assigns masks to ground truth class labels.""" 10 | 11 | @abstractmethod 12 | def assign(self, 13 | pred_instances: InstanceData, 14 | gt_instances: InstanceData, 15 | gt_instances_ignore: Optional[InstanceData] = None, 16 | **kwargs): 17 | """Assign masks to either a ground truth class label or a negative 18 | label.""" 19 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .beit import BEiT 3 | from .mae import MAE 4 | from .mae_fix import MAE_fix 5 | from .vit import VisionTransformer 6 | 7 | __all__ = [ 8 | 'VisionTransformer', 'BEiT', 'MAE', 'MAE_fix' 9 | ] 10 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/beit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/beit.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/bisenetv1.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/bisenetv1.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/bisenetv2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/bisenetv2.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/cgnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/cgnet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/ddrnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/ddrnet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/deit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/deit.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/erfnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/erfnet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/fast_scnn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/fast_scnn.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/hrnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/hrnet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/icnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/icnet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clip.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clip.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv2.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv2_fix.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv2_fix.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv3.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv3_fix.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_clipv3_fix.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_distill.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_distill.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_fix.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mae_fix.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mit.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mobilenet_v2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mobilenet_v2.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mobilenet_v3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mobilenet_v3.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/mscan.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/mscan.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/pidnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/pidnet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/resnest.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/resnest.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/resnext.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/resnext.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/stdc.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/stdc.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/swin.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/swin.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/timm_backbone.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/timm_backbone.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/twins.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/twins.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/unet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/unet.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/uniformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/uniformer.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/uniformer_cross_fix.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/uniformer_cross_fix.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/uniformer_official.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/uniformer_official.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/vit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/vit.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/backbones/__pycache__/vpd.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/backbones/__pycache__/vpd.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .dpt_head import DPTHead 3 | from .dpt_head_depth import DPTHead_depth 4 | from .fcn_head import FCNHead 5 | from .psp_head import PSPHead 6 | from .uper_head import UPerHead 7 | 8 | 9 | __all__ = [ 10 | 'FCNHead', 'PSPHead', 'UPerHead', 'DPTHead', 'DPTHead_depth' 11 | ] 12 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ann_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ann_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/apc_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/apc_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/aspp_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/aspp_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/cc_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/cc_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/da_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/da_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ddr_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ddr_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/decode_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/decode_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/decode_head_depth.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/decode_head_depth.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/decode_head_wo_class.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/decode_head_wo_class.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dm_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dm_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dnl_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dnl_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dpt_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dpt_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dpt_head_depth.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/dpt_head_depth.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ema_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ema_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/enc_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/enc_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/fcn_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/fcn_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/fpn_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/fpn_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/gc_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/gc_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ham_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ham_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/isa_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/isa_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/knet_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/knet_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/mask2former_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/mask2former_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/maskformer_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/maskformer_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/nl_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/nl_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ocr_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/ocr_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/pid_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/pid_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/point_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/point_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/psa_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/psa_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/psp_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/psp_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/san_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/san_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/segformer_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/segformer_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/segmenter_mask_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/segmenter_mask_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/sep_aspp_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/sep_aspp_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/sep_fcn_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/sep_fcn_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/setr_mla_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/setr_mla_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/setr_up_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/setr_up_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/stdc_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/stdc_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/uper_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/uper_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/decode_heads/__pycache__/vpd_depth_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/decode_heads/__pycache__/vpd_depth_head.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .accuracy import Accuracy, accuracy 3 | from .boundary_loss import BoundaryLoss 4 | from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, 5 | cross_entropy, mask_cross_entropy) 6 | from .dice_loss import DiceLoss 7 | from .focal_loss import FocalLoss 8 | from .huasdorff_distance_loss import HuasdorffDisstanceLoss 9 | from .lovasz_loss import LovaszLoss 10 | from .ohem_cross_entropy_loss import OhemCrossEntropy 11 | from .silog_loss import SiLogLoss 12 | from .tversky_loss import TverskyLoss 13 | from .utils import reduce_loss, weight_reduce_loss, weighted_loss 14 | from .sigloss import SigLoss 15 | 16 | __all__ = [ 17 | 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 18 | 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', 19 | 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss', 20 | 'FocalLoss', 'TverskyLoss', 'OhemCrossEntropy', 'BoundaryLoss', 21 | 'HuasdorffDisstanceLoss', 'SiLogLoss', 'SigLoss' 22 | ] 23 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/accuracy.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/accuracy.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/boundary_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/boundary_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/dice_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/dice_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/focal_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/focal_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/huasdorff_distance_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/huasdorff_distance_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/lovasz_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/lovasz_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/ohem_cross_entropy_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/ohem_cross_entropy_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/sigloss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/sigloss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/silog_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/silog_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/tversky_loss.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/tversky_loss.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/losses/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/losses/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .featurepyramid import Feature2Pyramid 3 | 4 | __all__ = [ 5 | 'Feature2Pyramid' 6 | ] 7 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/necks/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__pycache__/featurepyramid.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/necks/__pycache__/featurepyramid.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__pycache__/fpn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/necks/__pycache__/fpn.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__pycache__/ic_neck.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/necks/__pycache__/ic_neck.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__pycache__/jpu.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/necks/__pycache__/jpu.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__pycache__/mla_neck.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/necks/__pycache__/mla_neck.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/necks/__pycache__/multilevel_neck.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/necks/__pycache__/multilevel_neck.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base import BaseSegmentor 3 | from .cascade_encoder_decoder import CascadeEncoderDecoder 4 | from .depth_estimator import DepthEstimator 5 | from .encoder_decoder import EncoderDecoder 6 | from .multimodal_encoder_decoder import MultimodalEncoderDecoder 7 | from .seg_tta import SegTTAModel 8 | 9 | __all__ = [ 10 | 'BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder', 'SegTTAModel', 11 | 'MultimodalEncoderDecoder', 'DepthEstimator' 12 | ] 13 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/base.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/base.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/cascade_encoder_decoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/cascade_encoder_decoder.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/depth_estimator.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/depth_estimator.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/encoder_decoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/encoder_decoder.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/encoder_decoder_wo_class.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/encoder_decoder_wo_class.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/multimodal_encoder_decoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/multimodal_encoder_decoder.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/segmentors/__pycache__/seg_tta.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/segmentors/__pycache__/seg_tta.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/text_encoder/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .clip_text_encoder import CLIPTextEncoder 3 | 4 | __all__ = ['CLIPTextEncoder'] 5 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/text_encoder/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/text_encoder/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/text_encoder/__pycache__/clip_text_encoder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/text_encoder/__pycache__/clip_text_encoder.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .basic_block import BasicBlock, Bottleneck 3 | from .embed import PatchEmbed 4 | from .encoding import Encoding 5 | from .inverted_residual import InvertedResidual, InvertedResidualV3 6 | from .make_divisible import make_divisible 7 | from .point_sample import get_uncertain_point_coords_with_randomness 8 | from .ppm import DAPPM, PAPPM 9 | from .res_layer import ResLayer 10 | from .se_layer import SELayer 11 | from .self_attention_block import SelfAttentionBlock 12 | from .shape_convert import (nchw2nlc2nchw, nchw_to_nlc, nlc2nchw2nlc, 13 | nlc_to_nchw) 14 | from .up_conv_block import UpConvBlock 15 | 16 | # isort: off 17 | from .wrappers import Upsample, resize 18 | from .san_layers import MLP, LayerNorm2d, cross_attn_layer 19 | 20 | __all__ = [ 21 | 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', 22 | 'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'PatchEmbed', 23 | 'nchw_to_nlc', 'nlc_to_nchw', 'nchw2nlc2nchw', 'nlc2nchw2nlc', 'Encoding', 24 | 'Upsample', 'resize', 'DAPPM', 'PAPPM', 'BasicBlock', 'Bottleneck', 25 | 'cross_attn_layer', 'LayerNorm2d', 'MLP', 26 | 'get_uncertain_point_coords_with_randomness' 27 | ] 28 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/basic_block.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/basic_block.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/embed.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/embed.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/encoding.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/encoding.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/inverted_residual.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/inverted_residual.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/make_divisible.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/make_divisible.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/point_sample.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/point_sample.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/ppm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/ppm.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/res_layer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/res_layer.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/san_layers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/san_layers.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/se_layer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/se_layer.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/self_attention_block.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/self_attention_block.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/shape_convert.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/shape_convert.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/up_conv_block.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/up_conv_block.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/__pycache__/wrappers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/models/utils/__pycache__/wrappers.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/models/utils/make_divisible.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | def make_divisible(value, divisor, min_value=None, min_ratio=0.9): 3 | """Make divisible function. 4 | 5 | This function rounds the channel number to the nearest value that can be 6 | divisible by the divisor. It is taken from the original tf repo. It ensures 7 | that all layers have a channel number that is divisible by divisor. It can 8 | be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa 9 | 10 | Args: 11 | value (int): The original channel number. 12 | divisor (int): The divisor to fully divide the channel number. 13 | min_value (int): The minimum value of the output channel. 14 | Default: None, means that the minimum value equal to the divisor. 15 | min_ratio (float): The minimum ratio of the rounded channel number to 16 | the original channel number. Default: 0.9. 17 | 18 | Returns: 19 | int: The modified output channel number. 20 | """ 21 | 22 | if min_value is None: 23 | min_value = divisor 24 | new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) 25 | # Make sure that round down does not go down by more than (1-min_ratio). 26 | if new_value < min_ratio * value: 27 | new_value += divisor 28 | return new_value 29 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/registry/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .registry import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, INFERENCERS, 3 | LOG_PROCESSORS, LOOPS, METRICS, MODEL_WRAPPERS, MODELS, 4 | OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS, 5 | PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, 6 | TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS, 7 | WEIGHT_INITIALIZERS) 8 | 9 | __all__ = [ 10 | 'HOOKS', 'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 11 | 'WEIGHT_INITIALIZERS', 'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 12 | 'TASK_UTILS', 'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 13 | 'VISBACKENDS', 'VISUALIZERS', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'LOOPS', 14 | 'EVALUATOR', 'LOG_PROCESSORS', 'OPTIM_WRAPPERS', 'INFERENCERS' 15 | ] 16 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/registry/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/registry/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/registry/__pycache__/registry.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/registry/__pycache__/registry.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .sampler import BasePixelSampler, OHEMPixelSampler, build_pixel_sampler 3 | from .seg_data_sample import SegDataSample 4 | 5 | __all__ = [ 6 | 'SegDataSample', 'BasePixelSampler', 'OHEMPixelSampler', 7 | 'build_pixel_sampler' 8 | ] 9 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/structures/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/__pycache__/seg_data_sample.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/structures/__pycache__/seg_data_sample.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .base_pixel_sampler import BasePixelSampler 3 | from .builder import build_pixel_sampler 4 | from .ohem_pixel_sampler import OHEMPixelSampler 5 | 6 | __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] 7 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/sampler/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/structures/sampler/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/sampler/__pycache__/base_pixel_sampler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/structures/sampler/__pycache__/base_pixel_sampler.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/sampler/__pycache__/builder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/structures/sampler/__pycache__/builder.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/sampler/__pycache__/ohem_pixel_sampler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/structures/sampler/__pycache__/ohem_pixel_sampler.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/sampler/base_pixel_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from abc import ABCMeta, abstractmethod 3 | 4 | 5 | class BasePixelSampler(metaclass=ABCMeta): 6 | """Base class of pixel sampler.""" 7 | 8 | def __init__(self, **kwargs): 9 | pass 10 | 11 | @abstractmethod 12 | def sample(self, seg_logit, seg_label): 13 | """Placeholder for sample function.""" 14 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/structures/sampler/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import warnings 3 | 4 | from mmseg.registry import TASK_UTILS 5 | 6 | PIXEL_SAMPLERS = TASK_UTILS 7 | 8 | 9 | def build_pixel_sampler(cfg, **default_args): 10 | """Build pixel sampler for segmentation map.""" 11 | warnings.warn( 12 | '``build_pixel_sampler`` would be deprecated soon, please use ' 13 | '``mmseg.registry.TASK_UTILS.build()`` ') 14 | return TASK_UTILS.build(cfg, default_args=default_args) 15 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/class_names.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/class_names.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/collect_env.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/collect_env.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/get_templates.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/get_templates.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/io.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/io.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/mask_classification.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/mask_classification.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/misc.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/misc.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/set_env.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/set_env.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/tokenizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/tokenizer.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/__pycache__/typing_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/__pycache__/typing_utils.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/bpe_simple_vocab_16e6.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/utils/bpe_simple_vocab_16e6.txt.gz -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmengine.utils import get_git_hash 3 | from mmengine.utils.dl_utils import collect_env as collect_base_env 4 | 5 | import mmseg 6 | 7 | 8 | def collect_env(): 9 | """Collect the information of the running environments.""" 10 | env_info = collect_base_env() 11 | env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' 12 | 13 | return env_info 14 | 15 | 16 | if __name__ == '__main__': 17 | for name, val in collect_env().items(): 18 | print(f'{name}: {val}') 19 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/utils/typing_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | """Collecting some commonly used type hint in mmflow.""" 3 | from typing import Dict, List, Optional, Sequence, Tuple, Union 4 | 5 | import torch 6 | from mmengine.config import ConfigDict 7 | 8 | from mmseg.structures import SegDataSample 9 | 10 | # Type hint of config data 11 | ConfigType = Union[ConfigDict, dict] 12 | OptConfigType = Optional[ConfigType] 13 | # Type hint of one or more config data 14 | MultiConfig = Union[ConfigType, Sequence[ConfigType]] 15 | OptMultiConfig = Optional[MultiConfig] 16 | 17 | SampleList = Sequence[SegDataSample] 18 | OptSampleList = Optional[SampleList] 19 | 20 | # Type hint of Tensor 21 | TensorDict = Dict[str, torch.Tensor] 22 | TensorList = Sequence[torch.Tensor] 23 | 24 | ForwardResults = Union[Dict[str, torch.Tensor], List[SegDataSample], 25 | Tuple[torch.Tensor], torch.Tensor] 26 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | 3 | __version__ = '1.2.2' 4 | 5 | 6 | def parse_version_info(version_str): 7 | version_info = [] 8 | for x in version_str.split('.'): 9 | if x.isdigit(): 10 | version_info.append(int(x)) 11 | elif x.find('rc') != -1: 12 | patch_version = x.split('rc') 13 | version_info.append(int(patch_version[0])) 14 | version_info.append(f'rc{patch_version[1]}') 15 | return tuple(version_info) 16 | 17 | 18 | version_info = parse_version_info(__version__) 19 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from .local_visualizer import SegLocalVisualizer 3 | 4 | __all__ = ['SegLocalVisualizer'] 5 | -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/visualization/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/visualization/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/mmseg/visualization/__pycache__/local_visualizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/eval/dense_prediction/mmseg/visualization/__pycache__/local_visualizer.cpython-38.pyc -------------------------------------------------------------------------------- /eval/dense_prediction/prepare_ckpt.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python tools/model_converters/proteus2mmseg.py ckpt_path revised_ckpt_path -------------------------------------------------------------------------------- /eval/dense_prediction/requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/optional.txt 2 | -r requirements/runtime.txt 3 | -r requirements/tests.txt 4 | -r requirements/multimodal.txt 5 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/albu.txt: -------------------------------------------------------------------------------- 1 | albumentations>=0.3.2 --no-binary qudida,albumentations 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/docs.txt: -------------------------------------------------------------------------------- 1 | docutils==0.16.0 2 | myst-parser 3 | -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme 4 | sphinx==4.0.2 5 | sphinx_copybutton 6 | sphinx_markdown_tables 7 | urllib3<2.0.0 8 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/mminstall.txt: -------------------------------------------------------------------------------- 1 | mmcv>=2.0.0rc4,<2.2.0 2 | mmengine>=0.5.0,<1.0.0 3 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/multimodal.txt: -------------------------------------------------------------------------------- 1 | ftfy 2 | regex 3 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/optional.txt: -------------------------------------------------------------------------------- 1 | cityscapesscripts 2 | -e git+https://github.com/openai/CLIP.git@main#egg=clip 3 | 4 | # for vpd model 5 | diffusers 6 | einops==0.3.0 7 | imageio==2.9.0 8 | imageio-ffmpeg==0.4.2 9 | invisible-watermark 10 | kornia==0.6 11 | -e git+https://github.com/CompVis/stable-diffusion@21f890f#egg=latent-diffusion 12 | nibabel 13 | omegaconf==2.1.1 14 | pudb==2019.2 15 | pytorch-lightning==1.4.2 16 | streamlit>=0.73.1 17 | -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers 18 | test-tube>=0.7.5 19 | timm 20 | torch-fidelity==0.3.0 21 | torchmetrics==0.6.0 22 | transformers==4.19.2 23 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/readthedocs.txt: -------------------------------------------------------------------------------- 1 | mmcv>=2.0.0rc1,<2.1.0 2 | mmengine>=0.4.0,<1.0.0 3 | prettytable 4 | scipy 5 | torch 6 | torchvision 7 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/runtime.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | packaging 4 | prettytable 5 | scipy 6 | -------------------------------------------------------------------------------- /eval/dense_prediction/requirements/tests.txt: -------------------------------------------------------------------------------- 1 | codecov 2 | flake8 3 | ftfy 4 | interrogate 5 | pytest 6 | regex 7 | xdoctest>=0.10.0 8 | yapf 9 | -------------------------------------------------------------------------------- /eval/dense_prediction/run.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | bash tools/dist_train.sh configs/mae/mae-small_upernet_8xb2-amp-60k_ade20k-518x518_proteus.py 4 -------------------------------------------------------------------------------- /eval/dense_prediction/setup.cfg: -------------------------------------------------------------------------------- 1 | [yapf] 2 | based_on_style = pep8 3 | blank_line_before_nested_class_or_def = true 4 | split_before_expression_after_opening_paren = true 5 | 6 | [isort] 7 | line_length = 79 8 | multi_line_output = 0 9 | extra_standard_library = setuptools 10 | known_first_party = mmseg 11 | known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,torch,ts 12 | no_lines_before = STDLIB,LOCALFOLDER 13 | default_section = THIRDPARTY 14 | 15 | [codespell] 16 | skip = *.po,*.ts,*.ipynb 17 | count = 18 | quiet-level = 3 19 | ignore-words-list = formating,sur,hist,dota,warmup,damon 20 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_apis/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch.nn as nn 3 | 4 | from mmseg.models import EncoderDecoder 5 | from mmseg.models.decode_heads.decode_head import BaseDecodeHead 6 | from mmseg.registry import MODELS 7 | 8 | 9 | @MODELS.register_module(name='InferExampleHead') 10 | class ExampleDecodeHead(BaseDecodeHead): 11 | 12 | def __init__(self, num_classes=19, out_channels=None): 13 | super().__init__( 14 | 3, 3, num_classes=num_classes, out_channels=out_channels) 15 | 16 | def forward(self, inputs): 17 | return self.cls_seg(inputs[0]) 18 | 19 | 20 | @MODELS.register_module(name='InferExampleBackbone') 21 | class ExampleBackbone(nn.Module): 22 | 23 | def __init__(self): 24 | super().__init__() 25 | self.conv = nn.Conv2d(3, 3, 3) 26 | 27 | def init_weights(self, pretrained=None): 28 | pass 29 | 30 | def forward(self, x): 31 | return [self.conv(x)] 32 | 33 | 34 | @MODELS.register_module(name='InferExampleModel') 35 | class ExampleModel(EncoderDecoder): 36 | 37 | def __init__(self, **kwargs): 38 | super().__init__(**kwargs) 39 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_digit_version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmseg import digit_version 3 | 4 | 5 | def test_digit_version(): 6 | assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0) 7 | assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0) 8 | assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0) 9 | assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1) 10 | assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0) 11 | assert digit_version('1.0') == digit_version('1.0.0') 12 | assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5') 13 | assert digit_version('1.0.0dev') < digit_version('1.0.0a') 14 | assert digit_version('1.0.0a') < digit_version('1.0.0a1') 15 | assert digit_version('1.0.0a') < digit_version('1.0.0b') 16 | assert digit_version('1.0.0b') < digit_version('1.0.0rc') 17 | assert digit_version('1.0.0rc1') < digit_version('1.0.0') 18 | assert digit_version('1.0.0') < digit_version('1.0.0post') 19 | assert digit_version('1.0.0post') < digit_version('1.0.0post1') 20 | assert digit_version('v1') == (1, 0, 0, 0, 0, 0) 21 | assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0) 22 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_engine/test_optimizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | import torch.nn as nn 4 | from mmengine.optim import build_optim_wrapper 5 | 6 | 7 | class ExampleModel(nn.Module): 8 | 9 | def __init__(self): 10 | super().__init__() 11 | self.param1 = nn.Parameter(torch.ones(1)) 12 | self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) 13 | self.conv2 = nn.Conv2d(4, 2, kernel_size=1) 14 | self.bn = nn.BatchNorm2d(2) 15 | 16 | def forward(self, x): 17 | return x 18 | 19 | 20 | base_lr = 0.01 21 | base_wd = 0.0001 22 | momentum = 0.9 23 | 24 | 25 | def test_build_optimizer(): 26 | model = ExampleModel() 27 | optim_wrapper_cfg = dict( 28 | type='OptimWrapper', 29 | optimizer=dict( 30 | type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)) 31 | optim_wrapper = build_optim_wrapper(model, optim_wrapper_cfg) 32 | # test whether optimizer is successfully built from parent. 33 | assert isinstance(optim_wrapper.optimizer, torch.optim.SGD) 34 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_backbones/test_fast_scnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import pytest 3 | import torch 4 | 5 | from mmseg.models.backbones import FastSCNN 6 | 7 | 8 | def test_fastscnn_backbone(): 9 | with pytest.raises(AssertionError): 10 | # Fast-SCNN channel constraints. 11 | FastSCNN( 12 | 3, (32, 48), 13 | 64, (64, 96, 128), (2, 2, 1), 14 | global_out_channels=127, 15 | higher_in_channels=64, 16 | lower_in_channels=128) 17 | 18 | # Test FastSCNN Standard Forward 19 | model = FastSCNN( 20 | in_channels=3, 21 | downsample_dw_channels=(4, 6), 22 | global_in_channels=8, 23 | global_block_channels=(8, 12, 16), 24 | global_block_strides=(2, 2, 1), 25 | global_out_channels=16, 26 | higher_in_channels=8, 27 | lower_in_channels=16, 28 | fusion_out_channels=16, 29 | ) 30 | model.init_weights() 31 | model.train() 32 | batch_size = 4 33 | imgs = torch.randn(batch_size, 3, 64, 128) 34 | feat = model(imgs) 35 | 36 | assert len(feat) == 3 37 | # higher-res 38 | assert feat[0].shape == torch.Size([batch_size, 8, 8, 16]) 39 | # lower-res 40 | assert feat[1].shape == torch.Size([batch_size, 16, 2, 4]) 41 | # FFM output 42 | assert feat[2].shape == torch.Size([batch_size, 16, 8, 16]) 43 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_ann_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.decode_heads import ANNHead 5 | from .utils import to_cuda 6 | 7 | 8 | def test_ann_head(): 9 | 10 | inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 8, 21, 21)] 11 | head = ANNHead( 12 | in_channels=[4, 8], 13 | channels=2, 14 | num_classes=19, 15 | in_index=[-2, -1], 16 | project_channels=8) 17 | if torch.cuda.is_available(): 18 | head, inputs = to_cuda(head, inputs) 19 | outputs = head(inputs) 20 | assert outputs.shape == (1, head.num_classes, 21, 21) 21 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_cc_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import pytest 3 | import torch 4 | 5 | from mmseg.models.decode_heads import CCHead 6 | from .utils import to_cuda 7 | 8 | 9 | def test_cc_head(): 10 | head = CCHead(in_channels=16, channels=8, num_classes=19) 11 | assert len(head.convs) == 2 12 | assert hasattr(head, 'cca') 13 | if not torch.cuda.is_available(): 14 | pytest.skip('CCHead requires CUDA') 15 | inputs = [torch.randn(1, 16, 23, 23)] 16 | head, inputs = to_cuda(head, inputs) 17 | outputs = head(inputs) 18 | assert outputs.shape == (1, head.num_classes, 23, 23) 19 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_ema_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.decode_heads import EMAHead 5 | from .utils import to_cuda 6 | 7 | 8 | def test_emanet_head(): 9 | head = EMAHead( 10 | in_channels=4, 11 | ema_channels=3, 12 | channels=2, 13 | num_stages=3, 14 | num_bases=2, 15 | num_classes=19) 16 | for param in head.ema_mid_conv.parameters(): 17 | assert not param.requires_grad 18 | assert hasattr(head, 'ema_module') 19 | inputs = [torch.randn(1, 4, 23, 23)] 20 | if torch.cuda.is_available(): 21 | head, inputs = to_cuda(head, inputs) 22 | outputs = head(inputs) 23 | assert outputs.shape == (1, head.num_classes, 23, 23) 24 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_gc_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.decode_heads import GCHead 5 | from .utils import to_cuda 6 | 7 | 8 | def test_gc_head(): 9 | head = GCHead(in_channels=4, channels=4, num_classes=19) 10 | assert len(head.convs) == 2 11 | assert hasattr(head, 'gc_block') 12 | inputs = [torch.randn(1, 4, 23, 23)] 13 | if torch.cuda.is_available(): 14 | head, inputs = to_cuda(head, inputs) 15 | outputs = head(inputs) 16 | assert outputs.shape == (1, head.num_classes, 23, 23) 17 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_isa_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.decode_heads import ISAHead 5 | from .utils import to_cuda 6 | 7 | 8 | def test_isa_head(): 9 | 10 | inputs = [torch.randn(1, 8, 23, 23)] 11 | isa_head = ISAHead( 12 | in_channels=8, 13 | channels=4, 14 | num_classes=19, 15 | isa_channels=4, 16 | down_factor=(8, 8)) 17 | if torch.cuda.is_available(): 18 | isa_head, inputs = to_cuda(isa_head, inputs) 19 | output = isa_head(inputs) 20 | assert output.shape == (1, isa_head.num_classes, 23, 23) 21 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_nl_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.decode_heads import NLHead 5 | from .utils import to_cuda 6 | 7 | 8 | def test_nl_head(): 9 | head = NLHead(in_channels=8, channels=4, num_classes=19) 10 | assert len(head.convs) == 2 11 | assert hasattr(head, 'nl_block') 12 | inputs = [torch.randn(1, 8, 23, 23)] 13 | if torch.cuda.is_available(): 14 | head, inputs = to_cuda(head, inputs) 15 | outputs = head(inputs) 16 | assert outputs.shape == (1, head.num_classes, 23, 23) 17 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_ocr_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.decode_heads import FCNHead, OCRHead 5 | from .utils import to_cuda 6 | 7 | 8 | def test_ocr_head(): 9 | 10 | inputs = [torch.randn(1, 8, 23, 23)] 11 | ocr_head = OCRHead( 12 | in_channels=8, channels=4, num_classes=19, ocr_channels=8) 13 | fcn_head = FCNHead(in_channels=8, channels=4, num_classes=19) 14 | if torch.cuda.is_available(): 15 | head, inputs = to_cuda(ocr_head, inputs) 16 | head, inputs = to_cuda(fcn_head, inputs) 17 | prev_output = fcn_head(inputs) 18 | output = ocr_head(inputs, prev_output) 19 | assert output.shape == (1, ocr_head.num_classes, 23, 23) 20 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_psp_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import pytest 3 | import torch 4 | 5 | from mmseg.models.decode_heads import PSPHead 6 | from .utils import _conv_has_norm, to_cuda 7 | 8 | 9 | def test_psp_head(): 10 | 11 | with pytest.raises(AssertionError): 12 | # pool_scales must be list|tuple 13 | PSPHead(in_channels=4, channels=2, num_classes=19, pool_scales=1) 14 | 15 | # test no norm_cfg 16 | head = PSPHead(in_channels=4, channels=2, num_classes=19) 17 | assert not _conv_has_norm(head, sync_bn=False) 18 | 19 | # test with norm_cfg 20 | head = PSPHead( 21 | in_channels=4, 22 | channels=2, 23 | num_classes=19, 24 | norm_cfg=dict(type='SyncBN')) 25 | assert _conv_has_norm(head, sync_bn=True) 26 | 27 | inputs = [torch.randn(1, 4, 23, 23)] 28 | head = PSPHead( 29 | in_channels=4, channels=2, num_classes=19, pool_scales=(1, 2, 3)) 30 | if torch.cuda.is_available(): 31 | head, inputs = to_cuda(head, inputs) 32 | assert head.psp_modules[0][0].output_size == 1 33 | assert head.psp_modules[1][0].output_size == 2 34 | assert head.psp_modules[2][0].output_size == 3 35 | outputs = head(inputs) 36 | assert outputs.shape == (1, head.num_classes, 23, 23) 37 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_segmenter_mask_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.decode_heads import SegmenterMaskTransformerHead 5 | from .utils import _conv_has_norm, to_cuda 6 | 7 | 8 | def test_segmenter_mask_transformer_head(): 9 | head = SegmenterMaskTransformerHead( 10 | in_channels=2, 11 | channels=2, 12 | num_classes=150, 13 | num_layers=2, 14 | num_heads=3, 15 | embed_dims=192, 16 | dropout_ratio=0.0) 17 | assert _conv_has_norm(head, sync_bn=True) 18 | head.init_weights() 19 | 20 | inputs = [torch.randn(1, 2, 32, 32)] 21 | if torch.cuda.is_available(): 22 | head, inputs = to_cuda(head, inputs) 23 | outputs = head(inputs) 24 | assert outputs.shape == (1, head.num_classes, 32, 32) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/test_uper_head.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import pytest 3 | import torch 4 | 5 | from mmseg.models.decode_heads import UPerHead 6 | from .utils import _conv_has_norm, to_cuda 7 | 8 | 9 | def test_uper_head(): 10 | 11 | with pytest.raises(AssertionError): 12 | # fpn_in_channels must be list|tuple 13 | UPerHead(in_channels=4, channels=2, num_classes=19) 14 | 15 | # test no norm_cfg 16 | head = UPerHead( 17 | in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1]) 18 | assert not _conv_has_norm(head, sync_bn=False) 19 | 20 | # test with norm_cfg 21 | head = UPerHead( 22 | in_channels=[4, 2], 23 | channels=2, 24 | num_classes=19, 25 | norm_cfg=dict(type='SyncBN'), 26 | in_index=[-2, -1]) 27 | assert _conv_has_norm(head, sync_bn=True) 28 | 29 | inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 2, 21, 21)] 30 | head = UPerHead( 31 | in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1]) 32 | if torch.cuda.is_available(): 33 | head, inputs = to_cuda(head, inputs) 34 | outputs = head(inputs) 35 | assert outputs.shape == (1, head.num_classes, 45, 45) 36 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_heads/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmcv.cnn import ConvModule 3 | from mmengine.utils.dl_utils.parrots_wrapper import SyncBatchNorm 4 | 5 | 6 | def _conv_has_norm(module, sync_bn): 7 | for m in module.modules(): 8 | if isinstance(m, ConvModule): 9 | if not m.with_norm: 10 | return False 11 | if sync_bn: 12 | if not isinstance(m.bn, SyncBatchNorm): 13 | return False 14 | return True 15 | 16 | 17 | def to_cuda(module, data): 18 | module = module.cuda() 19 | if isinstance(data, list): 20 | for i in range(len(data)): 21 | data[i] = data[i].cuda() 22 | return module, data 23 | 24 | 25 | def list_to_cuda(data): 26 | if isinstance(data, list): 27 | for i in range(len(data)): 28 | data[i] = list_to_cuda(data[i]) 29 | return data 30 | else: 31 | return data.cuda() 32 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_losses/test_cross_entropy_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | import torch.nn.functional as F 4 | 5 | from mmseg.models.losses import CrossEntropyLoss, weight_reduce_loss 6 | 7 | 8 | def test_cross_entropy_loss_class_weights(): 9 | loss_class = CrossEntropyLoss 10 | pred = torch.rand((1, 10, 4, 4)) 11 | target = torch.randint(0, 10, (1, 4, 4)) 12 | class_weight = torch.ones(10) 13 | avg_factor = target.numel() 14 | 15 | cross_entropy_loss = F.cross_entropy( 16 | pred, target, weight=class_weight, reduction='none', ignore_index=-100) 17 | 18 | expected_loss = weight_reduce_loss( 19 | cross_entropy_loss, 20 | weight=None, 21 | reduction='mean', 22 | avg_factor=avg_factor) 23 | 24 | # Test loss forward 25 | loss = loss_class(class_weight=class_weight.tolist())(pred, target) 26 | 27 | assert isinstance(loss, torch.Tensor) 28 | assert expected_loss == loss 29 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_losses/test_huasdorff_distance_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import pytest 3 | import torch 4 | 5 | from mmseg.models.losses import HuasdorffDisstanceLoss 6 | 7 | 8 | def test_huasdorff_distance_loss(): 9 | loss_class = HuasdorffDisstanceLoss 10 | pred = torch.rand((10, 8, 6, 6)) 11 | target = torch.rand((10, 6, 6)) 12 | class_weight = torch.rand(8) 13 | 14 | # Test loss forward 15 | loss = loss_class()(pred, target) 16 | assert isinstance(loss, torch.Tensor) 17 | 18 | # Test loss forward with avg_factor 19 | loss = loss_class()(pred, target, avg_factor=10) 20 | assert isinstance(loss, torch.Tensor) 21 | 22 | # Test loss forward with avg_factor and reduction is None, 'sum' and 'mean' 23 | for reduction in [None, 'sum', 'mean']: 24 | loss = loss_class()(pred, target, avg_factor=10, reduction=reduction) 25 | assert isinstance(loss, torch.Tensor) 26 | 27 | # Test loss forward with class_weight 28 | with pytest.raises(AssertionError): 29 | loss_class(class_weight=class_weight)(pred, target) 30 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_losses/test_kldiv_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models.losses.kldiv_loss import KLDivLoss 5 | 6 | 7 | def test_kldiv_loss_with_none_reduction(): 8 | loss_class = KLDivLoss 9 | pred = torch.rand((8, 5, 5)) 10 | target = torch.rand((8, 5, 5)) 11 | reduction = 'none' 12 | 13 | # Test loss forward 14 | loss = loss_class(reduction=reduction)(pred, target) 15 | assert isinstance(loss, torch.Tensor) 16 | assert loss.shape == (8, 5, 5), f'{loss.shape}' 17 | 18 | 19 | def test_kldiv_loss_with_mean_reduction(): 20 | loss_class = KLDivLoss 21 | pred = torch.rand((8, 5, 5)) 22 | target = torch.rand((8, 5, 5)) 23 | reduction = 'mean' 24 | 25 | # Test loss forward 26 | loss = loss_class(reduction=reduction)(pred, target) 27 | assert isinstance(loss, torch.Tensor) 28 | assert loss.shape == (8, ), f'{loss.shape}' 29 | 30 | 31 | def test_kldiv_loss_with_sum_reduction(): 32 | loss_class = KLDivLoss 33 | pred = torch.rand((8, 5, 5)) 34 | target = torch.rand((8, 5, 5)) 35 | reduction = 'sum' 36 | 37 | # Test loss forward 38 | loss = loss_class(reduction=reduction)(pred, target) 39 | assert isinstance(loss, torch.Tensor) 40 | assert loss.shape == (8, ), f'{loss.shape}' 41 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_losses/test_silog_loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from unittest import TestCase 3 | 4 | import torch 5 | 6 | from mmseg.models.losses import SiLogLoss 7 | 8 | 9 | class TestSiLogLoss(TestCase): 10 | 11 | def test_SiLogLoss_forward(self): 12 | pred = torch.tensor([[1.0, 2.0], [3.5, 4.0]], dtype=torch.float32) 13 | target = torch.tensor([[0.0, 2.0], [3.0, 4.0]], dtype=torch.float32) 14 | weight = torch.tensor([1.0, 0.5], dtype=torch.float32) 15 | 16 | loss_module = SiLogLoss() 17 | loss = loss_module.forward(pred, target, weight) 18 | 19 | expected_loss = 0.02 20 | self.assertAlmostEqual(loss.item(), expected_loss, places=2) 21 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_necks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_necks/test_fpn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models import FPN 5 | 6 | 7 | def test_fpn(): 8 | in_channels = [64, 128, 256, 512] 9 | inputs = [ 10 | torch.randn(1, c, 56 // 2**i, 56 // 2**i) 11 | for i, c in enumerate(in_channels) 12 | ] 13 | 14 | fpn = FPN(in_channels, 64, len(in_channels)) 15 | outputs = fpn(inputs) 16 | assert outputs[0].shape == torch.Size([1, 64, 56, 56]) 17 | assert outputs[1].shape == torch.Size([1, 64, 28, 28]) 18 | assert outputs[2].shape == torch.Size([1, 64, 14, 14]) 19 | assert outputs[3].shape == torch.Size([1, 64, 7, 7]) 20 | 21 | fpn = FPN( 22 | in_channels, 23 | 64, 24 | len(in_channels), 25 | upsample_cfg=dict(mode='nearest', scale_factor=2.0)) 26 | outputs = fpn(inputs) 27 | assert outputs[0].shape == torch.Size([1, 64, 56, 56]) 28 | assert outputs[1].shape == torch.Size([1, 64, 28, 28]) 29 | assert outputs[2].shape == torch.Size([1, 64, 14, 14]) 30 | assert outputs[3].shape == torch.Size([1, 64, 7, 7]) 31 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_necks/test_mla_neck.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models import MLANeck 5 | 6 | 7 | def test_mla(): 8 | in_channels = [4, 4, 4, 4] 9 | mla = MLANeck(in_channels, 32) 10 | 11 | inputs = [torch.randn(1, c, 12, 12) for i, c in enumerate(in_channels)] 12 | outputs = mla(inputs) 13 | assert outputs[0].shape == torch.Size([1, 32, 12, 12]) 14 | assert outputs[1].shape == torch.Size([1, 32, 12, 12]) 15 | assert outputs[2].shape == torch.Size([1, 32, 12, 12]) 16 | assert outputs[3].shape == torch.Size([1, 32, 12, 12]) 17 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_necks/test_multilevel_neck.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import torch 3 | 4 | from mmseg.models import MultiLevelNeck 5 | 6 | 7 | def test_multilevel_neck(): 8 | 9 | # Test init_weights 10 | MultiLevelNeck([266], 32).init_weights() 11 | 12 | # Test multi feature maps 13 | in_channels = [32, 64, 128, 256] 14 | inputs = [torch.randn(1, c, 14, 14) for i, c in enumerate(in_channels)] 15 | 16 | neck = MultiLevelNeck(in_channels, 32) 17 | outputs = neck(inputs) 18 | assert outputs[0].shape == torch.Size([1, 32, 7, 7]) 19 | assert outputs[1].shape == torch.Size([1, 32, 14, 14]) 20 | assert outputs[2].shape == torch.Size([1, 32, 28, 28]) 21 | assert outputs[3].shape == torch.Size([1, 32, 56, 56]) 22 | 23 | # Test one feature map 24 | in_channels = [768] 25 | inputs = [torch.randn(1, 768, 14, 14)] 26 | 27 | neck = MultiLevelNeck(in_channels, 32) 28 | outputs = neck(inputs) 29 | assert outputs[0].shape == torch.Size([1, 32, 7, 7]) 30 | assert outputs[1].shape == torch.Size([1, 32, 14, 14]) 31 | assert outputs[2].shape == torch.Size([1, 32, 28, 28]) 32 | assert outputs[3].shape == torch.Size([1, 32, 56, 56]) 33 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_segmentors/test_multimodal_encoder_decoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | from mmengine import ConfigDict 3 | 4 | from mmseg.models import build_segmentor 5 | from tests.test_models.test_segmentors.utils import \ 6 | _segmentor_forward_train_test 7 | 8 | 9 | def test_multimodal_encoder_decoder(): 10 | 11 | cfg = ConfigDict( 12 | type='MultimodalEncoderDecoder', 13 | asymetric_input=False, 14 | image_encoder=dict(type='ExampleBackbone', out_indices=[1, 2, 3, 4]), 15 | text_encoder=dict( 16 | type='ExampleTextEncoder', 17 | vocabulary=['A', 'B', 'C'], 18 | output_dims=3), 19 | decode_head=dict( 20 | type='ExampleDecodeHead', out_channels=1, num_classes=2), 21 | train_cfg=None, 22 | test_cfg=dict(mode='whole')) 23 | segmentor = build_segmentor(cfg) 24 | _segmentor_forward_train_test(segmentor) 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_models/test_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | -------------------------------------------------------------------------------- /eval/dense_prediction/tests/test_utils/test_io.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) OpenMMLab. All rights reserved. 2 | import os.path as osp 3 | 4 | import numpy as np 5 | import pytest 6 | from mmengine import FileClient 7 | 8 | from mmseg.utils import datafrombytes 9 | 10 | 11 | @pytest.mark.parametrize( 12 | ['backend', 'suffix'], 13 | [['nifti', '.nii.gz'], ['numpy', '.npy'], ['pickle', '.pkl']]) 14 | def test_datafrombytes(backend, suffix): 15 | 16 | file_client = FileClient('disk') 17 | file_path = osp.join(osp.dirname(__file__), '../data/biomedical' + suffix) 18 | bytes = file_client.get(file_path) 19 | data = datafrombytes(bytes, backend) 20 | 21 | if backend == 'pickle': 22 | # test pickle loading 23 | assert isinstance(data, dict) 24 | else: 25 | assert isinstance(data, np.ndarray) 26 | if backend == 'nifti': 27 | # test nifti file loading 28 | assert len(data.shape) == 3 29 | else: 30 | # test npy file loading 31 | # testing data biomedical.npy includes data and label 32 | assert len(data.shape) == 4 33 | assert data.shape[0] == 2 34 | -------------------------------------------------------------------------------- /eval/dense_prediction/tools/dist_test.sh: -------------------------------------------------------------------------------- 1 | CONFIG=$1 2 | CHECKPOINT=$2 3 | GPUS=$3 4 | NNODES=${NNODES:-1} 5 | NODE_RANK=${NODE_RANK:-0} 6 | PORT=${PORT:-29500} 7 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 8 | 9 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 10 | python -m torch.distributed.launch \ 11 | --nnodes=$NNODES \ 12 | --node_rank=$NODE_RANK \ 13 | --master_addr=$MASTER_ADDR \ 14 | --nproc_per_node=$GPUS \ 15 | --master_port=$PORT \ 16 | $(dirname "$0")/test.py \ 17 | $CONFIG \ 18 | $CHECKPOINT \ 19 | --launcher pytorch \ 20 | ${@:4} 21 | -------------------------------------------------------------------------------- /eval/dense_prediction/tools/dist_train.sh: -------------------------------------------------------------------------------- 1 | CONFIG=$1 2 | GPUS=$2 3 | NNODES=${NNODES:-1} 4 | NODE_RANK=${NODE_RANK:-0} 5 | PORT=${PORT:-29500} 6 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} 7 | 8 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 9 | python -m torch.distributed.launch \ 10 | --nnodes=$NNODES \ 11 | --node_rank=$NODE_RANK \ 12 | --master_addr=$MASTER_ADDR \ 13 | --nproc_per_node=$GPUS \ 14 | --master_port=$PORT \ 15 | $(dirname "$0")/train.py \ 16 | $CONFIG \ 17 | --launcher pytorch ${@:3} \ 18 | --resume -------------------------------------------------------------------------------- /eval/dense_prediction/tools/slurm_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | CHECKPOINT=$4 9 | GPUS=${GPUS:-4} 10 | GPUS_PER_NODE=${GPUS_PER_NODE:-4} 11 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 12 | PY_ARGS=${@:5} 13 | SRUN_ARGS=${SRUN_ARGS:-""} 14 | 15 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 16 | srun -p ${PARTITION} \ 17 | --job-name=${JOB_NAME} \ 18 | --gres=gpu:${GPUS_PER_NODE} \ 19 | --ntasks=${GPUS} \ 20 | --ntasks-per-node=${GPUS_PER_NODE} \ 21 | --cpus-per-task=${CPUS_PER_TASK} \ 22 | --kill-on-bad-exit=1 \ 23 | ${SRUN_ARGS} \ 24 | python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} 25 | -------------------------------------------------------------------------------- /eval/dense_prediction/tools/slurm_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | PARTITION=$1 6 | JOB_NAME=$2 7 | CONFIG=$3 8 | GPUS=${GPUS:-4} 9 | GPUS_PER_NODE=${GPUS_PER_NODE:-4} 10 | CPUS_PER_TASK=${CPUS_PER_TASK:-5} 11 | SRUN_ARGS=${SRUN_ARGS:-""} 12 | PY_ARGS=${@:4} 13 | 14 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 15 | srun -p ${PARTITION} \ 16 | --job-name=${JOB_NAME} \ 17 | --gres=gpu:${GPUS_PER_NODE} \ 18 | --ntasks=${GPUS} \ 19 | --ntasks-per-node=${GPUS_PER_NODE} \ 20 | --cpus-per-task=${CPUS_PER_TASK} \ 21 | --kill-on-bad-exit=1 \ 22 | ${SRUN_ARGS} \ 23 | python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} 24 | -------------------------------------------------------------------------------- /eval/fine_grained/README.md: -------------------------------------------------------------------------------- 1 | # Fine-grained Classification on 12 datasets 2 | 3 | 4 | ## Installation 5 | Please follow the installation instructions in `pretrain`. 6 | 7 | ## Dataset 8 | There are 12 fine-grained classification datasets in total: `aircraft`, `caltech101`, `cars`, `cifar10`, `cifar100`, `dtd`, `flowers`, `food`, `pets`, `sun397`, `voc2007`, `cub`. Most of them can be directly downloaded from torchvision, except: `caltech101`, `sun397`, `voc2007` and `cub`, which should be put under the directory of `./cache_data/raw/`. 9 | 10 | ## Training 11 | 1. Specify the model choice with `model` in the training script `run_all_datasets.sh`. 12 | 2. Use the `pretrained` parameter to provide the path to your pre-trained model. 13 | 3. Simply run the training script as follows: 14 | 15 | ``` 16 | bash run_all_datasets.sh 17 | ``` 18 | 19 | ## Acknowledgment 20 | 21 | This part is heavily build upon [SynCLR](https://github.com/google-research/syn-rep-learn/tree/main/SynCLR). We gratefully thank the authors for their wonderful works. -------------------------------------------------------------------------------- /eval/fine_grained/run_all_datasets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # datasets=("aircraft" "caltech101" "cars" "cifar10" "cifar100" "dtd" "flowers" "food" "pets" "sun397" "voc2007" "cub" "gtsrb" "country211" "mnist") 4 | datasets=("aircraft" "caltech101" "cars" "cifar10" "cifar100" "dtd" "flowers" "food" "pets" "sun397" "voc2007" "cub") 5 | 6 | echo "" > accuracy.log 7 | 8 | for dataset in "${datasets[@]}" 9 | do 10 | echo "Running code for dataset: $dataset" 11 | 12 | CUDA_VISIBLE_DEVICES=0 python main_downstream_linear_dinov2.py -b 32 \ 13 | --model 'vit_large' \ 14 | --pretrained path_pretrained_model \ 15 | --dataset "$dataset" --max-iter 500 >> accuracy.log; 16 | 17 | if [ $? -eq 0 ]; then 18 | echo "Run for dataset $dataset finished successfully." 19 | else 20 | echo "Run for dataset $dataset failed." 21 | fi 22 | done 23 | -------------------------------------------------------------------------------- /fig/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/.DS_Store -------------------------------------------------------------------------------- /fig/deit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/deit.png -------------------------------------------------------------------------------- /fig/dino_bl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/dino_bl.png -------------------------------------------------------------------------------- /fig/dino_s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/dino_s.png -------------------------------------------------------------------------------- /fig/general.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/general.png -------------------------------------------------------------------------------- /fig/less.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/less.png -------------------------------------------------------------------------------- /fig/proxy_dataset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/proxy_dataset.png -------------------------------------------------------------------------------- /fig/resized_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/resized_logo.png -------------------------------------------------------------------------------- /fig/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/teaser.png -------------------------------------------------------------------------------- /fig/visual.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/fig/visual.png -------------------------------------------------------------------------------- /pretrain/README.md: -------------------------------------------------------------------------------- 1 | # Pre-training on ImageNet-1K 2 | 3 | 4 | ## Installation 5 | Please follow the installation instructions in [DINOv2](https://github.com/facebookresearch/dinov2/tree/main?tab=readme-ov-file#installation) and install timm==0.9.16 as well. 6 | 7 | ## Dataset 8 | We prepare ImageNet-1K following the instructions in [DeiT](https://github.com/facebookresearch/deit/blob/main/README_deit.md#data-preparation). 9 | 10 | ## Training 11 | 1. Specify the directory of datasets with `data-path` in the training script `run_pretrain.sh`. 12 | 2. Use the `teacher-model` and `target_model` parameters to select the appropriate teacher and student models. 13 | 3. Specify the model choices with `model` to choose from DINOv2, SynCLR, CLIP. 14 | 4. For SynCLR and CLIP training, use the `teacher-path` parameter to indicate the path to the pre-trained teacher model. 15 | 5. Simply run the training script as follows: 16 | 17 | ``` 18 | bash run_pretrain.sh 19 | ``` 20 | 21 | 22 | ## Acknowledgment 23 | 24 | This part is heavily build upon [DeiT](https://github.com/facebookresearch/deit?tab=readme-ov-file), [DINOv2](https://github.com/facebookresearch/dinov2), [SynCLR](https://github.com/google-research/syn-rep-learn/tree/main/SynCLR). We gratefully thank the authors for their wonderful works. -------------------------------------------------------------------------------- /pretrain/__pycache__/augmentations.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/augmentations.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/datasets.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/datasets.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/losses_hint.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/losses_hint.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/models_clip.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/models_clip.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/models_dinov2.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/models_dinov2.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/models_proteus_clip.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/models_proteus_clip.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/models_proteus_dinov2.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/models_proteus_dinov2.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/models_proteus_synclr.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/models_proteus_synclr.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/models_synclr.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/models_synclr.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/samplers.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/samplers.cpython-39.pyc -------------------------------------------------------------------------------- /pretrain/__pycache__/utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BeSpontaneous/Proteus-pytorch/8b1b2749e4789d0de9615c5bbdb0fd67800eafd0/pretrain/__pycache__/utils.cpython-39.pyc --------------------------------------------------------------------------------