├── SurgNetCode ├── apex │ ├── apex │ │ ├── amp │ │ │ ├── lists │ │ │ │ ├── __init__.py │ │ │ │ └── tensor_overrides.py │ │ │ ├── __version__.py │ │ │ ├── __init__.py │ │ │ ├── compat.py │ │ │ └── rnn_compat.py │ │ ├── contrib │ │ │ ├── __init__.py │ │ │ ├── fmha │ │ │ │ └── __init__.py │ │ │ ├── clip_grad │ │ │ │ └── __init__.py │ │ │ ├── layer_norm │ │ │ │ ├── __init__.py │ │ │ │ └── layer_norm.py │ │ │ ├── sparsity │ │ │ │ ├── __init__.py │ │ │ │ └── permutation_search_kernels │ │ │ │ │ └── __init__.py │ │ │ ├── conv_bias_relu │ │ │ │ └── __init__.py │ │ │ ├── transducer │ │ │ │ └── __init__.py │ │ │ ├── peer_memory │ │ │ │ └── __init__.py │ │ │ ├── multihead_attn │ │ │ │ ├── MHA_bwd.png │ │ │ │ ├── MHA_fwd.png │ │ │ │ └── __init__.py │ │ │ ├── optimizers │ │ │ │ └── __init__.py │ │ │ ├── bottleneck │ │ │ │ └── __init__.py │ │ │ ├── groupbn │ │ │ │ └── __init__.py │ │ │ ├── focal_loss │ │ │ │ ├── __init__.py │ │ │ │ └── focal_loss.py │ │ │ ├── xentropy │ │ │ │ ├── __init__.py │ │ │ │ └── softmax_xentropy.py │ │ │ ├── csrc │ │ │ │ ├── groupbn │ │ │ │ │ └── cuda_utils.h │ │ │ │ ├── optimizers │ │ │ │ │ ├── fused_lamb_cuda.cpp │ │ │ │ │ ├── multi_tensor_distopt_adam.cpp │ │ │ │ │ └── multi_tensor_distopt_lamb.cpp │ │ │ │ ├── nccl_p2p │ │ │ │ │ ├── nccl_p2p.cpp │ │ │ │ │ └── nccl_p2p_cuda.cuh │ │ │ │ ├── peer_memory │ │ │ │ │ └── peer_memory.cpp │ │ │ │ └── xentropy │ │ │ │ │ └── interface.cpp │ │ │ └── test │ │ │ │ ├── fused_dense │ │ │ │ └── test_fused_dense.py │ │ │ │ └── multihead_attn │ │ │ │ └── test_mha_fused_softmax.py │ │ ├── transformer │ │ │ ├── testing │ │ │ │ └── __init__.py │ │ │ ├── amp │ │ │ │ └── __init__.py │ │ │ ├── functional │ │ │ │ └── __init__.py │ │ │ ├── pipeline_parallel │ │ │ │ ├── __init__.py │ │ │ │ └── schedules │ │ │ │ │ └── __init__.py │ │ │ ├── _data │ │ │ │ └── __init__.py │ │ │ ├── layers │ │ │ │ └── __init__.py │ │ │ ├── log_util.py │ │ │ ├── __init__.py │ │ │ ├── enums.py │ │ │ └── utils.py │ │ ├── RNN │ │ │ ├── README.md │ │ │ └── __init__.py │ │ ├── mlp │ │ │ └── __init__.py │ │ ├── fused_dense │ │ │ └── __init__.py │ │ ├── normalization │ │ │ └── __init__.py │ │ ├── multi_tensor_apply │ │ │ ├── __init__.py │ │ │ └── multi_tensor_apply.py │ │ ├── optimizers │ │ │ └── __init__.py │ │ ├── fp16_utils │ │ │ ├── __init__.py │ │ │ └── README.md │ │ ├── _autocast_utils.py │ │ └── parallel │ │ │ └── multiproc.py │ ├── tests │ │ ├── L0 │ │ │ ├── run_amp │ │ │ │ ├── __init__.py │ │ │ │ ├── utils.py │ │ │ │ └── test_larc.py │ │ │ ├── run_fp16util │ │ │ │ └── __init__.py │ │ │ ├── run_optimizers │ │ │ │ └── __init__.py │ │ │ ├── run_transformer │ │ │ │ ├── __init__.py │ │ │ │ └── test_transformer_utils.py │ │ │ └── run_test.py │ │ ├── L1 │ │ │ ├── cross_product_distributed │ │ │ │ └── run.sh │ │ │ └── cross_product │ │ │ │ └── run.sh │ │ ├── distributed │ │ │ ├── amp_master_params │ │ │ │ ├── run.sh │ │ │ │ └── compare.py │ │ │ ├── DDP │ │ │ │ └── run_race_test.sh │ │ │ └── synced_batchnorm │ │ │ │ ├── unit_test.sh │ │ │ │ └── test_batchnorm1d.py │ │ └── docker_extension_builds │ │ │ └── run.sh │ ├── apex.egg-info │ │ ├── top_level.txt │ │ ├── dependency_links.txt │ │ └── PKG-INFO │ ├── build │ │ └── lib │ │ │ └── apex │ │ │ ├── amp │ │ │ ├── lists │ │ │ │ ├── __init__.py │ │ │ │ └── tensor_overrides.py │ │ │ ├── __version__.py │ │ │ ├── __init__.py │ │ │ └── compat.py │ │ │ ├── contrib │ │ │ ├── __init__.py │ │ │ ├── fmha │ │ │ │ └── __init__.py │ │ │ ├── clip_grad │ │ │ │ └── __init__.py │ │ │ ├── layer_norm │ │ │ │ ├── __init__.py │ │ │ │ └── layer_norm.py │ │ │ ├── sparsity │ │ │ │ ├── __init__.py │ │ │ │ └── permutation_search_kernels │ │ │ │ │ └── __init__.py │ │ │ ├── conv_bias_relu │ │ │ │ └── __init__.py │ │ │ ├── transducer │ │ │ │ └── __init__.py │ │ │ ├── peer_memory │ │ │ │ └── __init__.py │ │ │ ├── optimizers │ │ │ │ └── __init__.py │ │ │ ├── bottleneck │ │ │ │ └── __init__.py │ │ │ ├── multihead_attn │ │ │ │ └── __init__.py │ │ │ ├── groupbn │ │ │ │ └── __init__.py │ │ │ ├── focal_loss │ │ │ │ ├── __init__.py │ │ │ │ └── focal_loss.py │ │ │ └── xentropy │ │ │ │ ├── __init__.py │ │ │ │ └── softmax_xentropy.py │ │ │ ├── transformer │ │ │ ├── testing │ │ │ │ └── __init__.py │ │ │ ├── amp │ │ │ │ └── __init__.py │ │ │ ├── functional │ │ │ │ └── __init__.py │ │ │ ├── pipeline_parallel │ │ │ │ ├── __init__.py │ │ │ │ └── schedules │ │ │ │ │ └── __init__.py │ │ │ ├── _data │ │ │ │ └── __init__.py │ │ │ ├── layers │ │ │ │ └── __init__.py │ │ │ ├── log_util.py │ │ │ ├── __init__.py │ │ │ ├── enums.py │ │ │ └── utils.py │ │ │ ├── mlp │ │ │ └── __init__.py │ │ │ ├── fused_dense │ │ │ └── __init__.py │ │ │ ├── RNN │ │ │ └── __init__.py │ │ │ ├── normalization │ │ │ └── __init__.py │ │ │ ├── multi_tensor_apply │ │ │ ├── __init__.py │ │ │ └── multi_tensor_apply.py │ │ │ ├── optimizers │ │ │ └── __init__.py │ │ │ ├── fp16_utils │ │ │ └── __init__.py │ │ │ ├── _autocast_utils.py │ │ │ └── parallel │ │ │ └── multiproc.py │ ├── requirements_dev.txt │ ├── requirements.txt │ ├── examples │ │ ├── simple │ │ │ └── distributed │ │ │ │ ├── run.sh │ │ │ │ └── README.md │ │ ├── README.md │ │ └── docker │ │ │ ├── Dockerfile │ │ │ └── README.md │ ├── docs │ │ ├── source │ │ │ ├── _static │ │ │ │ └── img │ │ │ │ │ └── nv-pytorch2.png │ │ │ ├── layernorm.rst │ │ │ ├── optimizers.rst │ │ │ ├── parallel.rst │ │ │ ├── _templates │ │ │ │ └── layout.html │ │ │ └── index.rst │ │ └── Makefile │ ├── csrc │ │ ├── compat.h │ │ ├── megatron │ │ │ └── fused_weight_gradient_dense.cpp │ │ └── flatten_unflatten.cpp │ └── LICENSE ├── mmseg │ ├── models │ │ ├── losses │ │ │ ├── focal_loss.py │ │ │ ├── __pycache__ │ │ │ │ ├── utils.cpython-36.pyc │ │ │ │ ├── utils.cpython-37.pyc │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── accuracy.cpython-36.pyc │ │ │ │ ├── accuracy.cpython-37.pyc │ │ │ │ ├── loss_focal.cpython-37.pyc │ │ │ │ ├── lovasz_loss.cpython-36.pyc │ │ │ │ ├── lovasz_loss.cpython-37.pyc │ │ │ │ ├── cross_entropy_loss.cpython-36.pyc │ │ │ │ └── cross_entropy_loss.cpython-37.pyc │ │ │ └── __init__.py │ │ ├── necks │ │ │ ├── __init__.py │ │ │ └── __pycache__ │ │ │ │ ├── fpn.cpython-37.pyc │ │ │ │ └── __init__.cpython-37.pyc │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── builder.cpython-36.pyc │ │ │ └── builder.cpython-37.pyc │ │ ├── backbones │ │ │ ├── __pycache__ │ │ │ │ ├── cgnet.cpython-36.pyc │ │ │ │ ├── cgnet.cpython-37.pyc │ │ │ │ ├── hrnet.cpython-37.pyc │ │ │ │ ├── resnet.cpython-37.pyc │ │ │ │ ├── unet.cpython-37.pyc │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── resnest.cpython-37.pyc │ │ │ │ ├── resnext.cpython-37.pyc │ │ │ │ ├── fast_scnn.cpython-36.pyc │ │ │ │ ├── fast_scnn.cpython-37.pyc │ │ │ │ ├── mobilenet_v2.cpython-37.pyc │ │ │ │ ├── mobilenet_v3.cpython-37.pyc │ │ │ │ └── mix_transformer.cpython-37.pyc │ │ │ └── __init__.py │ │ ├── segmentors │ │ │ ├── __pycache__ │ │ │ │ ├── base.cpython-37.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── encoder_decoder.cpython-37.pyc │ │ │ │ └── cascade_encoder_decoder.cpython-37.pyc │ │ │ └── __init__.py │ │ ├── utils │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── res_layer.cpython-36.pyc │ │ │ │ ├── res_layer.cpython-37.pyc │ │ │ │ ├── se_layer.cpython-36.pyc │ │ │ │ ├── se_layer.cpython-37.pyc │ │ │ │ ├── make_divisible.cpython-36.pyc │ │ │ │ ├── make_divisible.cpython-37.pyc │ │ │ │ ├── up_conv_block.cpython-36.pyc │ │ │ │ ├── up_conv_block.cpython-37.pyc │ │ │ │ ├── inverted_residual.cpython-36.pyc │ │ │ │ ├── inverted_residual.cpython-37.pyc │ │ │ │ ├── self_attention_block.cpython-36.pyc │ │ │ │ └── self_attention_block.cpython-37.pyc │ │ │ ├── __init__.py │ │ │ └── make_divisible.py │ │ ├── decode_heads │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── ann_head.cpython-36.pyc │ │ │ │ ├── ann_head.cpython-37.pyc │ │ │ │ ├── apc_head.cpython-36.pyc │ │ │ │ ├── apc_head.cpython-37.pyc │ │ │ │ ├── cc_head.cpython-36.pyc │ │ │ │ ├── cc_head.cpython-37.pyc │ │ │ │ ├── da_head.cpython-36.pyc │ │ │ │ ├── da_head.cpython-37.pyc │ │ │ │ ├── dm_head.cpython-36.pyc │ │ │ │ ├── dm_head.cpython-37.pyc │ │ │ │ ├── dnl_head.cpython-36.pyc │ │ │ │ ├── dnl_head.cpython-37.pyc │ │ │ │ ├── ema_head.cpython-36.pyc │ │ │ │ ├── ema_head.cpython-37.pyc │ │ │ │ ├── enc_head.cpython-36.pyc │ │ │ │ ├── enc_head.cpython-37.pyc │ │ │ │ ├── fcn_head.cpython-36.pyc │ │ │ │ ├── fcn_head.cpython-37.pyc │ │ │ │ ├── fpn_head.cpython-36.pyc │ │ │ │ ├── fpn_head.cpython-37.pyc │ │ │ │ ├── gc_head.cpython-36.pyc │ │ │ │ ├── gc_head.cpython-37.pyc │ │ │ │ ├── nl_head.cpython-36.pyc │ │ │ │ ├── nl_head.cpython-37.pyc │ │ │ │ ├── ocr_head.cpython-36.pyc │ │ │ │ ├── ocr_head.cpython-37.pyc │ │ │ │ ├── psa_head.cpython-37.pyc │ │ │ │ ├── psp_head.cpython-37.pyc │ │ │ │ ├── aspp_head.cpython-36.pyc │ │ │ │ ├── aspp_head.cpython-37.pyc │ │ │ │ ├── point_head.cpython-36.pyc │ │ │ │ ├── point_head.cpython-37.pyc │ │ │ │ ├── uper_head.cpython-37.pyc │ │ │ │ ├── decode_head.cpython-36.pyc │ │ │ │ ├── decode_head.cpython-37.pyc │ │ │ │ ├── lraspp_head.cpython-36.pyc │ │ │ │ ├── lraspp_head.cpython-37.pyc │ │ │ │ ├── sep_aspp_head.cpython-37.pyc │ │ │ │ ├── sep_fcn_head.cpython-37.pyc │ │ │ │ ├── segformer_head.cpython-37.pyc │ │ │ │ ├── cascade_decode_head.cpython-36.pyc │ │ │ │ └── cascade_decode_head.cpython-37.pyc │ │ │ ├── __init__.py │ │ │ ├── cc_head.py │ │ │ ├── nl_head.py │ │ │ └── gc_head.py │ │ ├── __init__.py │ │ └── builder.py │ ├── core │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── misc.cpython-36.pyc │ │ │ │ ├── misc.cpython-37.pyc │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ └── __init__.cpython-37.pyc │ │ │ └── misc.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── __init__.cpython-37.pyc │ │ ├── __init__.py │ │ ├── seg │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── builder.cpython-36.pyc │ │ │ │ └── builder.cpython-37.pyc │ │ │ ├── sampler │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ │ ├── base_pixel_sampler.cpython-36.pyc │ │ │ │ │ ├── base_pixel_sampler.cpython-37.pyc │ │ │ │ │ ├── ohem_pixel_sampler.cpython-36.pyc │ │ │ │ │ └── ohem_pixel_sampler.cpython-37.pyc │ │ │ │ ├── __init__.py │ │ │ │ └── base_pixel_sampler.py │ │ │ ├── __init__.py │ │ │ └── builder.py │ │ └── evaluation │ │ │ ├── __pycache__ │ │ │ ├── metrics.cpython-36.pyc │ │ │ ├── metrics.cpython-37.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── class_names.cpython-36.pyc │ │ │ ├── class_names.cpython-37.pyc │ │ │ ├── eval_hooks.cpython-36.pyc │ │ │ └── eval_hooks.cpython-37.pyc │ │ │ └── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── version.cpython-36.pyc │ │ └── version.cpython-37.pyc │ ├── apis │ │ ├── __pycache__ │ │ │ ├── test.cpython-37.pyc │ │ │ ├── train.cpython-37.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── inference.cpython-36.pyc │ │ │ └── inference.cpython-37.pyc │ │ └── __init__.py │ ├── datasets │ │ ├── __pycache__ │ │ │ ├── ade.cpython-36.pyc │ │ │ ├── ade.cpython-37.pyc │ │ │ ├── ade.cpython-38.pyc │ │ │ ├── hrf.cpython-36.pyc │ │ │ ├── hrf.cpython-37.pyc │ │ │ ├── hrf.cpython-38.pyc │ │ │ ├── voc.cpython-36.pyc │ │ │ ├── voc.cpython-37.pyc │ │ │ ├── voc.cpython-38.pyc │ │ │ ├── custom.cpython-36.pyc │ │ │ ├── custom.cpython-37.pyc │ │ │ ├── custom.cpython-38.pyc │ │ │ ├── drive.cpython-36.pyc │ │ │ ├── drive.cpython-37.pyc │ │ │ ├── drive.cpython-38.pyc │ │ │ ├── stare.cpython-36.pyc │ │ │ ├── stare.cpython-37.pyc │ │ │ ├── stare.cpython-38.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── builder.cpython-36.pyc │ │ │ ├── builder.cpython-37.pyc │ │ │ ├── builder.cpython-38.pyc │ │ │ ├── chase_db1.cpython-36.pyc │ │ │ ├── chase_db1.cpython-37.pyc │ │ │ ├── chase_db1.cpython-38.pyc │ │ │ ├── cocostuff.cpython-36.pyc │ │ │ ├── cocostuff.cpython-37.pyc │ │ │ ├── cocostuff.cpython-38.pyc │ │ │ ├── mapillary.cpython-36.pyc │ │ │ ├── mapillary.cpython-37.pyc │ │ │ ├── mapillary.cpython-38.pyc │ │ │ ├── cityscapes.cpython-36.pyc │ │ │ ├── cityscapes.cpython-37.pyc │ │ │ ├── cityscapes.cpython-38.pyc │ │ │ ├── pascal_context.cpython-36.pyc │ │ │ ├── pascal_context.cpython-37.pyc │ │ │ ├── pascal_context.cpython-38.pyc │ │ │ ├── dataset_wrappers.cpython-36.pyc │ │ │ ├── dataset_wrappers.cpython-37.pyc │ │ │ └── dataset_wrappers.cpython-38.pyc │ │ ├── pipelines │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-36.pyc │ │ │ │ ├── __init__.cpython-37.pyc │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── compose.cpython-36.pyc │ │ │ │ ├── compose.cpython-37.pyc │ │ │ │ ├── compose.cpython-38.pyc │ │ │ │ ├── formating.cpython-36.pyc │ │ │ │ ├── formating.cpython-37.pyc │ │ │ │ ├── formating.cpython-38.pyc │ │ │ │ ├── loading.cpython-36.pyc │ │ │ │ ├── loading.cpython-37.pyc │ │ │ │ ├── loading.cpython-38.pyc │ │ │ │ ├── transforms.cpython-36.pyc │ │ │ │ ├── transforms.cpython-37.pyc │ │ │ │ ├── transforms.cpython-38.pyc │ │ │ │ ├── test_time_aug.cpython-36.pyc │ │ │ │ ├── test_time_aug.cpython-37.pyc │ │ │ │ └── test_time_aug.cpython-38.pyc │ │ │ ├── __init__.py │ │ │ └── compose.py │ │ ├── hrf.py │ │ ├── stare.py │ │ ├── drive.py │ │ ├── chase_db1.py │ │ ├── __init__.py │ │ └── dataset_wrappers.py │ ├── ops │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── encoding.cpython-36.pyc │ │ │ ├── encoding.cpython-37.pyc │ │ │ ├── wrappers.cpython-36.pyc │ │ │ └── wrappers.cpython-37.pyc │ │ └── wrappers.py │ ├── utils │ │ ├── __pycache__ │ │ │ ├── logger.cpython-36.pyc │ │ │ ├── logger.cpython-37.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── collect_env.cpython-36.pyc │ │ │ └── collect_env.cpython-37.pyc │ │ ├── __init__.py │ │ ├── collect_env.py │ │ └── logger.py │ ├── version.py │ └── __init__.py ├── pretrain │ └── Put the pretrained model here ├── work_dirs │ └── tmp │ │ └── Put the EndoVisSub2017 finetuned model here for testing ├── __pycache__ │ ├── pvt.cpython-37.pyc │ ├── swin.cpython-37.pyc │ ├── align_resize.cpython-37.pyc │ └── models_mae_pvt.cpython-37.pyc ├── util │ ├── __pycache__ │ │ ├── misc.cpython-37.pyc │ │ ├── datasets.cpython-37.pyc │ │ ├── lr_sched.cpython-37.pyc │ │ ├── pos_embed.cpython-37.pyc │ │ └── pos_embed.cpython-38.pyc │ ├── lr_sched.py │ └── crop.py ├── mmcv_custom │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── train_api.cpython-37.pyc │ │ ├── train_api.cpython-38.pyc │ │ ├── checkpoint.cpython-37.pyc │ │ ├── checkpoint.cpython-38.pyc │ │ ├── resize_transform.cpython-37.pyc │ │ ├── resize_transform.cpython-38.pyc │ │ ├── layer_decay_optimizer_constructor.cpython-37.pyc │ │ └── layer_decay_optimizer_constructor.cpython-38.pyc │ ├── apex_runner │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── checkpoint.cpython-37.pyc │ │ │ ├── checkpoint.cpython-38.pyc │ │ │ ├── optimizer.cpython-37.pyc │ │ │ ├── optimizer.cpython-38.pyc │ │ │ ├── apex_iter_based_runner.cpython-37.pyc │ │ │ └── apex_iter_based_runner.cpython-38.pyc │ │ ├── __init__.py │ │ └── optimizer.py │ └── __init__.py ├── dist_train.sh ├── dist_test.sh └── configs │ └── _base_ │ ├── default_runtime.py │ └── schedules │ ├── schedule_80k_adamw.py │ ├── schedule_20k.py │ ├── schedule_40k.py │ ├── schedule_160k.py │ └── schedule_320k.py ├── SurgNet_Framework.png └── IVIS Database EULA(1.1).docx /SurgNetCode/apex/apex/amp/lists/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L0/run_amp/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/focal_loss.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | apex 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/testing/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/amp/lists/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L0/run_fp16util/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L0/run_optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L0/run_transformer/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/pretrain/Put the pretrained model here: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/RNN/README.md: -------------------------------------------------------------------------------- 1 | Under construction... 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/mlp/__init__.py: -------------------------------------------------------------------------------- 1 | from .mlp import * 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/testing/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/mlp/__init__.py: -------------------------------------------------------------------------------- 1 | from .mlp import * 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/fmha/__init__.py: -------------------------------------------------------------------------------- 1 | from .fmha import FMHAFun 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/fused_dense/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_dense import * 2 | -------------------------------------------------------------------------------- /SurgNetCode/work_dirs/tmp/Put the EndoVisSub2017 finetuned model here for testing: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/fmha/__init__.py: -------------------------------------------------------------------------------- 1 | from .fmha import FMHAFun 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/fused_dense/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_dense import * 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/requirements_dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | flake8>=3.7.9 3 | Sphinx>=3.0.3 -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/clip_grad/__init__.py: -------------------------------------------------------------------------------- 1 | from .clip_grad import clip_grad_norm_ 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/layer_norm/__init__.py: -------------------------------------------------------------------------------- 1 | from .layer_norm import FastLayerNorm 2 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/necks/__init__.py: -------------------------------------------------------------------------------- 1 | from .fpn import FPN 2 | 3 | __all__ = ['FPN'] 4 | -------------------------------------------------------------------------------- /SurgNet_Framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNet_Framework.png -------------------------------------------------------------------------------- /IVIS Database EULA(1.1).docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/IVIS Database EULA(1.1).docx -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/clip_grad/__init__.py: -------------------------------------------------------------------------------- 1 | from .clip_grad import clip_grad_norm_ 2 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/layer_norm/__init__.py: -------------------------------------------------------------------------------- 1 | from .layer_norm import FastLayerNorm 2 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .misc import add_prefix 2 | 3 | __all__ = ['add_prefix'] 4 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/amp/__version__.py: -------------------------------------------------------------------------------- 1 | VERSION = (0, 1, 0) 2 | __version__ = '.'.join(map(str, VERSION)) 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/RNN/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import LSTM, GRU, ReLU, Tanh, mLSTM 2 | 3 | __all__ = ['models'] 4 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/sparsity/__init__.py: -------------------------------------------------------------------------------- 1 | from .sparse_masklib import create_mask 2 | from .asp import ASP 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/amp/__version__.py: -------------------------------------------------------------------------------- 1 | VERSION = (0, 1, 0) 2 | __version__ = '.'.join(map(str, VERSION)) 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/RNN/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import LSTM, GRU, ReLU, Tanh, mLSTM 2 | 3 | __all__ = ['models'] 4 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/sparsity/__init__.py: -------------------------------------------------------------------------------- 1 | from .sparse_masklib import create_mask 2 | from .asp import ASP 3 | -------------------------------------------------------------------------------- /SurgNetCode/__pycache__/pvt.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/__pycache__/pvt.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/__pycache__/swin.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/__pycache__/swin.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/conv_bias_relu/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU 2 | 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/transducer/__init__.py: -------------------------------------------------------------------------------- 1 | from .transducer import TransducerJoint 2 | from .transducer import TransducerLoss -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L1/cross_product_distributed/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cp ../common/* . 4 | bash run_test.sh distributed $1 5 | -------------------------------------------------------------------------------- /SurgNetCode/apex/requirements.txt: -------------------------------------------------------------------------------- 1 | cxxfilt>=0.2.0 2 | tqdm>=4.28.1 3 | numpy>=1.15.3 4 | PyYAML>=5.1 5 | pytest>=3.5.1 6 | packaging>=14.0 7 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/conv_bias_relu/__init__.py: -------------------------------------------------------------------------------- 1 | from .conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU 2 | 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/transducer/__init__.py: -------------------------------------------------------------------------------- 1 | from .transducer import TransducerJoint 2 | from .transducer import TransducerLoss -------------------------------------------------------------------------------- /SurgNetCode/util/__pycache__/misc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/util/__pycache__/misc.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/normalization/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm, FusedRMSNorm, MixedFusedRMSNorm 2 | -------------------------------------------------------------------------------- /SurgNetCode/__pycache__/align_resize.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/__pycache__/align_resize.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/__pycache__/models_mae_pvt.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/__pycache__/models_mae_pvt.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/peer_memory/__init__.py: -------------------------------------------------------------------------------- 1 | from .peer_memory import PeerMemoryPool 2 | from .peer_halo_exchanger_1d import PeerHaloExchanger1d 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/examples/simple/distributed/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python -m torch.distributed.launch --nproc_per_node=2 distributed_data_parallel.py 3 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/__pycache__/version.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/__pycache__/version.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/__pycache__/version.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/__pycache__/version.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/util/__pycache__/datasets.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/util/__pycache__/datasets.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/util/__pycache__/lr_sched.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/util/__pycache__/lr_sched.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/util/__pycache__/pos_embed.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/util/__pycache__/pos_embed.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/util/__pycache__/pos_embed.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/util/__pycache__/pos_embed.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/amp/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer.amp.grad_scaler import GradScaler 2 | 3 | 4 | __all__ = [ 5 | "GradScaler", 6 | ] 7 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/normalization/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm, FusedRMSNorm, MixedFusedRMSNorm 2 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/apis/__pycache__/test.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/apis/__pycache__/test.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/apis/__pycache__/train.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/apis/__pycache__/train.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/multihead_attn/MHA_bwd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/apex/apex/contrib/multihead_attn/MHA_bwd.png -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/multihead_attn/MHA_fwd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/apex/apex/contrib/multihead_attn/MHA_fwd.png -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/multi_tensor_apply/__init__.py: -------------------------------------------------------------------------------- 1 | from .multi_tensor_apply import MultiTensorApply 2 | 3 | multi_tensor_applier = MultiTensorApply(2048*32) 4 | 5 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/peer_memory/__init__.py: -------------------------------------------------------------------------------- 1 | from .peer_memory import PeerMemoryPool 2 | from .peer_halo_exchanger_1d import PeerHaloExchanger1d 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/docs/source/_static/img/nv-pytorch2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/apex/docs/source/_static/img/nv-pytorch2.png -------------------------------------------------------------------------------- /SurgNetCode/mmseg/apis/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/apis/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/apis/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/apis/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/ade.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/ade.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/ade.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/ade.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/ade.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/ade.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/hrf.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/hrf.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/hrf.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/hrf.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/hrf.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/hrf.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/voc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/voc.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/voc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/voc.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/voc.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/voc.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .encoding import Encoding 2 | from .wrappers import Upsample, resize 3 | 4 | __all__ = ['Upsample', 'resize', 'Encoding'] 5 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/ops/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/ops/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/__pycache__/encoding.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/ops/__pycache__/encoding.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/__pycache__/encoding.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/ops/__pycache__/encoding.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/__pycache__/wrappers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/ops/__pycache__/wrappers.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/__pycache__/wrappers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/ops/__pycache__/wrappers.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/__pycache__/logger.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/utils/__pycache__/logger.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/__pycache__/logger.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/utils/__pycache__/logger.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/amp/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer.amp.grad_scaler import GradScaler 2 | 3 | 4 | __all__ = [ 5 | "GradScaler", 6 | ] 7 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/train_api.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/train_api.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/train_api.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/train_api.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/apis/__pycache__/inference.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/apis/__pycache__/inference.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/apis/__pycache__/inference.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/apis/__pycache__/inference.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluation import * # noqa: F401, F403 2 | from .seg import * # noqa: F401, F403 3 | from .utils import * # noqa: F401, F403 4 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/utils/__pycache__/misc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/utils/__pycache__/misc.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/utils/__pycache__/misc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/utils/__pycache__/misc.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/custom.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/custom.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/custom.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/custom.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/custom.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/custom.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/drive.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/drive.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/drive.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/drive.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/drive.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/drive.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/stare.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/stare.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/stare.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/stare.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/stare.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/stare.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/__pycache__/builder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/__pycache__/builder.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/__pycache__/builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/__pycache__/builder.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .fp16_optimizer import FP16_Optimizer 2 | from .fused_adam import FusedAdam 3 | from .fused_lamb import FusedLAMB 4 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/multi_tensor_apply/__init__.py: -------------------------------------------------------------------------------- 1 | from .multi_tensor_apply import MultiTensorApply 2 | 3 | multi_tensor_applier = MultiTensorApply(2048*32) 4 | 5 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/checkpoint.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/checkpoint.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/checkpoint.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/checkpoint.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/__pycache__/builder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/__pycache__/builder.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/__pycache__/builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/__pycache__/builder.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/builder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/builder.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/builder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/builder.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/builder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/builder.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/chase_db1.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/chase_db1.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/chase_db1.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/chase_db1.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/chase_db1.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/chase_db1.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/cocostuff.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/cocostuff.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/cocostuff.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/cocostuff.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/cocostuff.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/cocostuff.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/mapillary.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/mapillary.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/mapillary.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/mapillary.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/mapillary.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/mapillary.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/necks/__pycache__/fpn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/necks/__pycache__/fpn.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/__pycache__/collect_env.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/utils/__pycache__/collect_env.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/__pycache__/collect_env.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/utils/__pycache__/collect_env.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/distributed/amp_master_params/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python -m torch.distributed.launch --nproc_per_node=2 amp_master_params.py 3 | 4 | python compare.py 5 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/cityscapes.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/cityscapes.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/cityscapes.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/cityscapes.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/cityscapes.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/cityscapes.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .fp16_optimizer import FP16_Optimizer 2 | from .fused_adam import FusedAdam 3 | from .fused_lamb import FusedLAMB 4 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/distributed/DDP/run_race_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 ddp_race_condition_test.py 4 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/resize_transform.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/resize_transform.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/resize_transform.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/resize_transform.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/metrics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/metrics.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/pascal_context.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/pascal_context.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/pascal_context.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/pascal_context.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/pascal_context.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/pascal_context.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/cgnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/cgnet.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/cgnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/cgnet.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/hrnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/hrnet.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/resnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/resnet.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/unet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/unet.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/accuracy.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/accuracy.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/accuracy.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/accuracy.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/necks/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/necks/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/segmentors/__pycache__/base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/segmentors/__pycache__/base.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/res_layer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/res_layer.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/res_layer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/res_layer.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/se_layer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/se_layer.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/se_layer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/se_layer.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/sampler/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/sampler/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/dataset_wrappers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/dataset_wrappers.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/dataset_wrappers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/dataset_wrappers.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__pycache__/dataset_wrappers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/__pycache__/dataset_wrappers.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/resnest.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/resnest.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/resnext.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/resnext.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/loss_focal.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/loss_focal.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/lovasz_loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/lovasz_loss.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/lovasz_loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/lovasz_loss.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/functional/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax 2 | 3 | __all__ = [ 4 | "FusedScaleMaskSoftmax", 5 | ] 6 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/checkpoint.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/checkpoint.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/checkpoint.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/checkpoint.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/optimizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/optimizer.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/optimizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/optimizer.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/class_names.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/class_names.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/class_names.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/class_names.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__pycache__/eval_hooks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/evaluation/__pycache__/eval_hooks.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/compose.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/compose.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/compose.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/compose.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/compose.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/compose.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/formating.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/formating.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/formating.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/formating.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/formating.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/formating.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/loading.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/loading.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/loading.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/loading.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/loading.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/loading.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/fast_scnn.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/fast_scnn.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/fast_scnn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/fast_scnn.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/ann_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/ann_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/ann_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/ann_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/apc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/apc_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/apc_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/apc_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/cc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/cc_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/cc_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/cc_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/da_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/da_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/da_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/da_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/dm_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/dm_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/dm_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/dm_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/dnl_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/dnl_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/dnl_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/dnl_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/ema_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/ema_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/ema_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/ema_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/enc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/enc_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/enc_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/enc_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/fcn_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/fcn_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/fcn_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/fcn_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/fpn_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/fpn_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/fpn_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/fpn_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/gc_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/gc_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/gc_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/gc_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/nl_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/nl_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/nl_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/nl_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/ocr_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/ocr_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/ocr_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/ocr_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/psa_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/psa_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/psp_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/psp_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/segmentors/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/segmentors/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/make_divisible.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/make_divisible.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/make_divisible.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/make_divisible.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/up_conv_block.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/up_conv_block.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/up_conv_block.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/up_conv_block.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .collect_env import collect_env 2 | from .logger import get_root_logger, print_log 3 | 4 | __all__ = ['get_root_logger', 'collect_env', 'print_log'] 5 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/transforms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/transforms.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/transforms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/transforms.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/transforms.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/transforms.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/mobilenet_v2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/mobilenet_v2.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/mobilenet_v3.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/mobilenet_v3.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/aspp_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/aspp_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/aspp_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/aspp_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/point_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/point_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/point_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/point_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/uper_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/uper_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/inverted_residual.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/inverted_residual.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/inverted_residual.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/inverted_residual.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/functional/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax 2 | 3 | __all__ = [ 4 | "FusedScaleMaskSoftmax", 5 | ] 6 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/datasets/pipelines/__pycache__/test_time_aug.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__pycache__/mix_transformer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/backbones/__pycache__/mix_transformer.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/decode_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/decode_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/decode_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/decode_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/lraspp_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/sep_aspp_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/sep_aspp_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/sep_fcn_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/sep_fcn_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/losses/__pycache__/cross_entropy_loss.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/segmentors/__pycache__/encoder_decoder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/segmentors/__pycache__/encoder_decoder.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/self_attention_block.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/self_attention_block.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__pycache__/self_attention_block.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/utils/__pycache__/self_attention_block.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_pixel_sampler import BasePixelSampler 2 | from .ohem_pixel_sampler import OHEMPixelSampler 3 | 4 | __all__ = ['BasePixelSampler', 'OHEMPixelSampler'] 5 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/__pycache__/base_pixel_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/sampler/__pycache__/base_pixel_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/__pycache__/base_pixel_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/sampler/__pycache__/base_pixel_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/__pycache__/ohem_pixel_sampler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/sampler/__pycache__/ohem_pixel_sampler.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/__pycache__/ohem_pixel_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/core/seg/sampler/__pycache__/ohem_pixel_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/segformer_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/segformer_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/sparsity/permutation_search_kernels/__init__.py: -------------------------------------------------------------------------------- 1 | from .call_permutation_search_kernels import accelerated_search_for_good_permutation 2 | from .permutation_utilities import sum_after_2_to_4 -------------------------------------------------------------------------------- /SurgNetCode/apex/csrc/compat.h: -------------------------------------------------------------------------------- 1 | #ifndef TORCH_CHECK 2 | #define TORCH_CHECK AT_CHECK 3 | #endif 4 | 5 | #ifdef VERSION_GE_1_3 6 | #define DATA_PTR data_ptr 7 | #else 8 | #define DATA_PTR data 9 | #endif 10 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/layer_decay_optimizer_constructor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/layer_decay_optimizer_constructor.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__pycache__/layer_decay_optimizer_constructor.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/__pycache__/layer_decay_optimizer_constructor.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/apex_iter_based_runner.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/apex_iter_based_runner.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__pycache__/apex_iter_based_runner.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmcv_custom/apex_runner/__pycache__/apex_iter_based_runner.cpython-38.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-36.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/decode_heads/__pycache__/cascade_decode_head.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/segmentors/__init__.py: -------------------------------------------------------------------------------- 1 | from .cascade_encoder_decoder import CascadeEncoderDecoder 2 | from .encoder_decoder import EncoderDecoder 3 | 4 | __all__ = ['EncoderDecoder', 'CascadeEncoderDecoder'] 5 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/sparsity/permutation_search_kernels/__init__.py: -------------------------------------------------------------------------------- 1 | from .call_permutation_search_kernels import accelerated_search_for_good_permutation 2 | from .permutation_utilities import sum_after_2_to_4 -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/__init__.py: -------------------------------------------------------------------------------- 1 | from .builder import build_pixel_sampler 2 | from .sampler import BasePixelSampler, OHEMPixelSampler 3 | 4 | __all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] 5 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/segmentors/__pycache__/cascade_encoder_decoder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HanHuCAS/SurgNet/HEAD/SurgNetCode/mmseg/models/segmentors/__pycache__/cascade_encoder_decoder.cpython-37.pyc -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/bottleneck/__init__.py: -------------------------------------------------------------------------------- 1 | from .bottleneck import Bottleneck, SpatialBottleneck 2 | from .halo_exchangers import HaloExchangerNoComm, HaloExchangerAllGather, HaloExchangerSendRecv, HaloExchangerPeer 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/bottleneck/__init__.py: -------------------------------------------------------------------------------- 1 | from .bottleneck import Bottleneck, SpatialBottleneck 2 | from .halo_exchangers import HaloExchangerNoComm, HaloExchangerAllGather, HaloExchangerSendRecv, HaloExchangerPeer 3 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/multihead_attn/__init__.py: -------------------------------------------------------------------------------- 1 | from .self_multihead_attn import SelfMultiheadAttn 2 | from .encdec_multihead_attn import EncdecMultiheadAttn 3 | from .mask_softmax_dropout_func import fast_mask_softmax_dropout_func 4 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: apex 3 | Version: 0.1 4 | Summary: PyTorch Extensions written by NVIDIA 5 | License: UNKNOWN 6 | Platform: UNKNOWN 7 | License-File: LICENSE 8 | 9 | UNKNOWN 10 | 11 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/multihead_attn/__init__.py: -------------------------------------------------------------------------------- 1 | from .self_multihead_attn import SelfMultiheadAttn 2 | from .encdec_multihead_attn import EncdecMultiheadAttn 3 | from .mask_softmax_dropout_func import fast_mask_softmax_dropout_func 4 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L1/cross_product/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # DATADIR="/home/mcarilli/Desktop/pt18data/apex_stale/examples/imagenet/bare_metal_train_val/" 4 | # DATADIR="/opt/home/apex/examples/imagenet/" 5 | cp ../common/* . 6 | bash run_test.sh single_gpu $1 7 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Open-MMLab. All rights reserved. 2 | from .checkpoint import save_checkpoint 3 | from .apex_iter_based_runner import IterBasedRunnerAmp 4 | 5 | 6 | __all__ = [ 7 | 'save_checkpoint', 'IterBasedRunnerAmp', 8 | ] 9 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_sgd import FusedSGD 2 | from .fused_adam import FusedAdam 3 | from .fused_novograd import FusedNovoGrad 4 | from .fused_lamb import FusedLAMB 5 | from .fused_adagrad import FusedAdagrad 6 | from .fused_mixed_precision_lamb import FusedMixedPrecisionLamb 7 | -------------------------------------------------------------------------------- /SurgNetCode/dist_train.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | GPUS=$2 5 | PORT=${PORT:-29503} 6 | 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} 10 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/pipeline_parallel/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer.pipeline_parallel.schedules import get_forward_backward_func 2 | from apex.transformer.pipeline_parallel.schedules.common import build_model 3 | 4 | 5 | __all__ = [ 6 | "get_forward_backward_func", 7 | "build_model", 8 | ] 9 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/builder.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import Registry, build_from_cfg 2 | 3 | PIXEL_SAMPLERS = Registry('pixel sampler') 4 | 5 | 6 | def build_pixel_sampler(cfg, **default_args): 7 | """Build pixel sampler for segmentation map.""" 8 | return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) 9 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_sgd import FusedSGD 2 | from .fused_adam import FusedAdam 3 | from .fused_novograd import FusedNovoGrad 4 | from .fused_lamb import FusedLAMB 5 | from .fused_adagrad import FusedAdagrad 6 | from .fused_mixed_precision_lamb import FusedMixedPrecisionLamb 7 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/_data/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer._data._batchsampler import MegatronPretrainingRandomSampler 2 | from apex.transformer._data._batchsampler import MegatronPretrainingSampler 3 | 4 | 5 | __all__ = [ 6 | "MegatronPretrainingRandomSampler", 7 | "MegatronPretrainingSampler", 8 | ] 9 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/pipeline_parallel/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer.pipeline_parallel.schedules import get_forward_backward_func 2 | from apex.transformer.pipeline_parallel.schedules.common import build_model 3 | 4 | 5 | __all__ = [ 6 | "get_forward_backward_func", 7 | "build_model", 8 | ] 9 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/groupbn/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import bnp 4 | from .batch_norm import BatchNorm2d_NHWC 5 | del torch 6 | del bnp 7 | del batch_norm 8 | except ImportError as err: 9 | print("apex was installed without --bnp flag, contrib.groupbn is not available") 10 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/_data/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer._data._batchsampler import MegatronPretrainingRandomSampler 2 | from apex.transformer._data._batchsampler import MegatronPretrainingSampler 3 | 4 | 5 | __all__ = [ 6 | "MegatronPretrainingRandomSampler", 7 | "MegatronPretrainingSampler", 8 | ] 9 | -------------------------------------------------------------------------------- /SurgNetCode/dist_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFIG=$1 4 | CHECKPOINT=$2 5 | GPUS=$3 6 | PORT=${PORT:-29500} 7 | PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ 8 | python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ 9 | $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} 10 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/groupbn/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import bnp 4 | from .batch_norm import BatchNorm2d_NHWC 5 | del torch 6 | del bnp 7 | del batch_norm 8 | except ImportError as err: 9 | print("apex was installed without --bnp flag, contrib.groupbn is not available") 10 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .class_names import get_classes, get_palette 2 | from .eval_hooks import DistEvalHook, EvalHook 3 | from .metrics import eval_metrics, mean_dice, mean_iou 4 | 5 | __all__ = [ 6 | 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'eval_metrics', 7 | 'get_classes', 'get_palette' 8 | ] 9 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/amp/__init__.py: -------------------------------------------------------------------------------- 1 | from .amp import init, half_function, float_function, promote_function,\ 2 | register_half_function, register_float_function, register_promote_function 3 | from .handle import scale_loss, disable_casts 4 | from .frontend import initialize, state_dict, load_state_dict 5 | from ._amp_state import master_params, _amp_state 6 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import focal_loss_cuda 4 | from .focal_loss import focal_loss 5 | del torch 6 | del focal_loss_cuda 7 | del focal_loss 8 | except ImportError as err: 9 | print("apex was installed without --focal_loss flag, apex.contrib.focal_loss is not available") 10 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/amp/__init__.py: -------------------------------------------------------------------------------- 1 | from .amp import init, half_function, float_function, promote_function,\ 2 | register_half_function, register_float_function, register_promote_function 3 | from .handle import scale_loss, disable_casts 4 | from .frontend import initialize, state_dict, load_state_dict 5 | from ._amp_state import master_params, _amp_state 6 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/xentropy/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import xentropy_cuda 4 | from .softmax_xentropy import SoftmaxCrossEntropyLoss 5 | del torch 6 | del xentropy_cuda 7 | del softmax_xentropy 8 | except ImportError as err: 9 | print("apex was installed without --xentropy flag, contrib.xentropy is not available") 10 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/focal_loss/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import focal_loss_cuda 4 | from .focal_loss import focal_loss 5 | del torch 6 | del focal_loss_cuda 7 | del focal_loss 8 | except ImportError as err: 9 | print("apex was installed without --focal_loss flag, apex.contrib.focal_loss is not available") 10 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/xentropy/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import xentropy_cuda 4 | from .softmax_xentropy import SoftmaxCrossEntropyLoss 5 | del torch 6 | del xentropy_cuda 7 | del softmax_xentropy 8 | except ImportError as err: 9 | print("apex was installed without --xentropy flag, contrib.xentropy is not available") 10 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/seg/sampler/base_pixel_sampler.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | 3 | 4 | class BasePixelSampler(metaclass=ABCMeta): 5 | """Base class of pixel sampler.""" 6 | 7 | def __init__(self, **kwargs): 8 | pass 9 | 10 | @abstractmethod 11 | def sample(self, seg_logit, seg_label): 12 | """Placeholder for sample function.""" 13 | pass 14 | -------------------------------------------------------------------------------- /SurgNetCode/apex/docs/source/layernorm.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | apex.normalization.fused_layer_norm 5 | =================================== 6 | 7 | .. automodule:: apex.normalization 8 | .. currentmodule:: apex.normalization 9 | 10 | .. FusedAdam 11 | ---------- 12 | 13 | .. autoclass:: FusedLayerNorm 14 | :members: 15 | 16 | .. autoclass:: FusedRMSNorm 17 | :members: 18 | -------------------------------------------------------------------------------- /SurgNetCode/configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | # yapf:disable 2 | log_config = dict( 3 | interval=50, 4 | hooks=[ 5 | dict(type='TextLoggerHook', by_epoch=False), 6 | # dict(type='TensorboardLoggerHook') 7 | ]) 8 | # yapf:enable 9 | dist_params = dict(backend='nccl') 10 | log_level = 'INFO' 11 | load_from = None 12 | resume_from = None 13 | workflow = [('train', 1)] 14 | cudnn_benchmark = True 15 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/groupbn/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #include 2 | #ifndef CUDA_UTILS_H 3 | #define CUDA_UTILS_H 4 | 5 | namespace at { 6 | namespace cuda { 7 | 8 | namespace utils { 9 | 10 | static inline int MaxSharedMemoryPerMultiprocessor(int device_id) { 11 | return getDeviceProperties(device_id)->sharedMemPerMultiprocessor; 12 | } 13 | 14 | 15 | } 16 | } 17 | } 18 | 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. 2 | from apex.transformer.layers.layer_norm import FastLayerNorm 3 | from apex.transformer.layers.layer_norm import FusedLayerNorm 4 | from apex.transformer.layers.layer_norm import MixedFusedLayerNorm 5 | 6 | 7 | __all__ = [ 8 | "FastLayerNorm", 9 | "FusedLayerNorm", 10 | "MixedFusedLayerNorm", 11 | ] 12 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. 2 | from apex.transformer.layers.layer_norm import FastLayerNorm 3 | from apex.transformer.layers.layer_norm import FusedLayerNorm 4 | from apex.transformer.layers.layer_norm import MixedFusedLayerNorm 5 | 6 | 7 | __all__ = [ 8 | "FastLayerNorm", 9 | "FusedLayerNorm", 10 | "MixedFusedLayerNorm", 11 | ] 12 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/apis/__init__.py: -------------------------------------------------------------------------------- 1 | from .inference import inference_segmentor, init_segmentor, show_result_pyplot 2 | from .test import multi_gpu_test, single_gpu_test 3 | from .train import get_root_logger, set_random_seed, train_segmentor 4 | 5 | __all__ = [ 6 | 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', 7 | 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', 8 | 'show_result_pyplot' 9 | ] 10 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .inverted_residual import InvertedResidual, InvertedResidualV3 2 | from .make_divisible import make_divisible 3 | from .res_layer import ResLayer 4 | from .self_attention_block import SelfAttentionBlock 5 | from .up_conv_block import UpConvBlock 6 | 7 | __all__ = [ 8 | 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', 9 | 'UpConvBlock', 'InvertedResidualV3' 10 | ] 11 | -------------------------------------------------------------------------------- /SurgNetCode/configs/_base_/schedules/schedule_80k_adamw.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='AdamW', lr=0.0002, weight_decay=0.0001) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=0.0, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=80000) 8 | checkpoint_config = dict(by_epoch=False, interval=1000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /SurgNetCode/configs/_base_/schedules/schedule_20k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=20000) 8 | checkpoint_config = dict(by_epoch=False, interval=1000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /SurgNetCode/configs/_base_/schedules/schedule_40k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=40000) 8 | checkpoint_config = dict(by_epoch=False, interval=1000) 9 | evaluation = dict(interval=4000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .checkpoint import load_checkpoint 4 | from .layer_decay_optimizer_constructor import LayerDecayOptimizerConstructor 5 | from .resize_transform import SETR_Resize 6 | from .apex_runner.optimizer import DistOptimizerHook 7 | from .train_api import train_segmentor 8 | 9 | __all__ = ['load_checkpoint', 'LayerDecayOptimizerConstructor', 'SETR_Resize', 'DistOptimizerHook', 'train_segmentor'] 10 | -------------------------------------------------------------------------------- /SurgNetCode/configs/_base_/schedules/schedule_160k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=160000) 8 | checkpoint_config = dict(by_epoch=False, interval=2000) 9 | evaluation = dict(interval=16000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /SurgNetCode/configs/_base_/schedules/schedule_320k.py: -------------------------------------------------------------------------------- 1 | # optimizer 2 | optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) 3 | optimizer_config = dict() 4 | # learning policy 5 | lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) 6 | # runtime settings 7 | runner = dict(type='IterBasedRunner', max_iters=320000) 8 | checkpoint_config = dict(by_epoch=False, interval=32000) 9 | evaluation = dict(interval=32000, metric='mIoU') 10 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/fp16_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .fp16util import ( 2 | BN_convert_float, 3 | network_to_half, 4 | prep_param_lists, 5 | model_grads_to_master_grads, 6 | master_params_to_model_params, 7 | tofp16, 8 | to_python_float, 9 | clip_grad_norm, 10 | convert_module, 11 | convert_network, 12 | FP16Model, 13 | ) 14 | 15 | from .fp16_optimizer import FP16_Optimizer 16 | from .loss_scaler import LossScaler, DynamicLossScaler 17 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/core/utils/misc.py: -------------------------------------------------------------------------------- 1 | def add_prefix(inputs, prefix): 2 | """Add prefix for dict. 3 | 4 | Args: 5 | inputs (dict): The input dict with str keys. 6 | prefix (str): The prefix to add. 7 | 8 | Returns: 9 | 10 | dict: The dict with keys updated with ``prefix``. 11 | """ 12 | 13 | outputs = dict() 14 | for name, value in inputs.items(): 15 | outputs[f'{prefix}.{name}'] = value 16 | 17 | return outputs 18 | -------------------------------------------------------------------------------- /SurgNetCode/apex/examples/README.md: -------------------------------------------------------------------------------- 1 | This directory contains examples illustrating Apex mixed precision and distributed tools. 2 | 3 | **Note for users of the pre-unification API**: 4 | `deprecated_api` contains examples illustrating the old (pre-unified) APIs. These APIs will be removed soon, and users are strongly encouraged to switch. The separate mixed precision tools called `Amp` and `FP16_Optimizer` in the old API are exposed via different flags/optimization levels in the new API. 5 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/fp16_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .fp16util import ( 2 | BN_convert_float, 3 | network_to_half, 4 | prep_param_lists, 5 | model_grads_to_master_grads, 6 | master_params_to_model_params, 7 | tofp16, 8 | to_python_float, 9 | clip_grad_norm, 10 | convert_module, 11 | convert_network, 12 | FP16Model, 13 | ) 14 | 15 | from .fp16_optimizer import FP16_Optimizer 16 | from .loss_scaler import LossScaler, DynamicLossScaler 17 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/log_util.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | 5 | def get_transformer_logger(name: str) -> logging.Logger: 6 | name_wo_ext = os.path.splitext(name)[0] 7 | return logging.getLogger(name_wo_ext) 8 | 9 | 10 | def set_logging_level(verbosity) -> None: 11 | """Change logging severity. 12 | 13 | Args: 14 | verbosity 15 | """ 16 | from apex import _library_root_logger 17 | 18 | _library_root_logger.setLevel(verbosity) 19 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/log_util.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | 5 | def get_transformer_logger(name: str) -> logging.Logger: 6 | name_wo_ext = os.path.splitext(name)[0] 7 | return logging.getLogger(name_wo_ext) 8 | 9 | 10 | def set_logging_level(verbosity) -> None: 11 | """Change logging severity. 12 | 13 | Args: 14 | verbosity 15 | """ 16 | from apex import _library_root_logger 17 | 18 | _library_root_logger.setLevel(verbosity) 19 | -------------------------------------------------------------------------------- /SurgNetCode/apex/docs/source/optimizers.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | apex.optimizers 5 | =================================== 6 | 7 | .. automodule:: apex.optimizers 8 | .. currentmodule:: apex.optimizers 9 | 10 | .. FusedAdam 11 | ---------- 12 | 13 | .. autoclass:: FusedAdam 14 | :members: 15 | 16 | .. autoclass:: FusedLAMB 17 | :members: 18 | 19 | .. autoclass:: FusedNovoGrad 20 | :members: 21 | 22 | .. autoclass:: FusedSGD 23 | :members: 24 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | from mmcv.utils import collect_env as collect_base_env 2 | from mmcv.utils import get_git_hash 3 | 4 | import mmseg 5 | 6 | 7 | def collect_env(): 8 | """Collect the information of the running environments.""" 9 | env_info = collect_base_env() 10 | env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' 11 | 12 | return env_info 13 | 14 | 15 | if __name__ == '__main__': 16 | for name, val in collect_env().items(): 17 | print('{}: {}'.format(name, val)) 18 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .backbones import * # noqa: F401,F403 2 | from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, 3 | build_head, build_loss, build_segmentor) 4 | from .decode_heads import * # noqa: F401,F403 5 | from .losses import * # noqa: F401,F403 6 | from .necks import * # noqa: F401,F403 7 | from .segmentors import * # noqa: F401,F403 8 | 9 | __all__ = [ 10 | 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', 11 | 'build_head', 'build_loss', 'build_segmentor' 12 | ] 13 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | from .cgnet import CGNet 2 | from .fast_scnn import FastSCNN 3 | from .hrnet import HRNet 4 | from .mobilenet_v2 import MobileNetV2 5 | from .mobilenet_v3 import MobileNetV3 6 | from .resnest import ResNeSt 7 | from .resnet import ResNet, ResNetV1c, ResNetV1d 8 | from .resnext import ResNeXt 9 | from .unet import UNet 10 | 11 | from .mix_transformer import * 12 | 13 | __all__ = [ 14 | 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', 'FastSCNN', 15 | 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3',] 16 | -------------------------------------------------------------------------------- /SurgNetCode/apex/docs/source/parallel.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | apex.parallel 5 | =================================== 6 | 7 | .. automodule:: apex.parallel 8 | .. currentmodule:: apex.parallel 9 | 10 | .. DistributedDataParallel 11 | ---------- 12 | 13 | .. autoclass:: DistributedDataParallel 14 | :members: 15 | 16 | .. autoclass:: Reducer 17 | :members: 18 | 19 | .. autoclass:: SyncBatchNorm 20 | :members: 21 | 22 | Utility functions 23 | ---------------------------------- 24 | 25 | .. autofunction:: convert_syncbn_model 26 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/distributed/synced_batchnorm/unit_test.sh: -------------------------------------------------------------------------------- 1 | python python_single_gpu_unit_test.py 2 | python single_gpu_unit_test.py 3 | python test_batchnorm1d.py 4 | python -m torch.distributed.launch --nproc_per_node=2 two_gpu_unit_test.py 5 | python -m torch.distributed.launch --nproc_per_node=2 two_gpu_unit_test.py --fp16 6 | python -m torch.distributed.launch --nproc_per_node=2 two_gpu_test_different_batch_size.py --apex 7 | #beware, you need a system with at least 4 gpus to test group_size 2 | 3 | #include 4 | #include 5 | 6 | void wgrad_gemm_accum_fp32_cuda_stub( 7 | at::Tensor &input_2d, 8 | at::Tensor &d_output_2d, 9 | at::Tensor &d_weight 10 | ); 11 | 12 | void wgrad_gemm_accum_fp16_cuda_stub( 13 | at::Tensor &input_2d, 14 | at::Tensor &d_output_2d, 15 | at::Tensor &d_weight 16 | ); 17 | 18 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 19 | m.def("wgrad_gemm_accum_fp32", &wgrad_gemm_accum_fp32_cuda_stub, "wgrad gemm accum in fp32"); 20 | m.def("wgrad_gemm_accum_fp16", &wgrad_gemm_accum_fp16_cuda_stub, "wgrad gemm accum in fp16"); 21 | } 22 | -------------------------------------------------------------------------------- /SurgNetCode/apex/csrc/flatten_unflatten.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | // https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_flatten.h 4 | 5 | at::Tensor flatten(std::vector tensors) 6 | { 7 | return torch::utils::flatten_dense_tensors(tensors); 8 | } 9 | 10 | std::vector unflatten(at::Tensor flat, std::vector tensors) 11 | { 12 | return torch::utils::unflatten_dense_tensors(flat, tensors); 13 | } 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("flatten", &flatten, "Flatten dense tensors"); 17 | m.def("unflatten", &unflatten, "Unflatten dense tensors"); 18 | } 19 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void multi_tensor_lamb_cuda( 4 | int chunk_size, 5 | at::Tensor noop_flag, 6 | std::vector> tensor_lists, 7 | const float lr, 8 | const float beta1, 9 | const float beta2, 10 | const float epsilon, 11 | const int step, 12 | const int bias_correction, 13 | const float weight_decay, 14 | const int grad_averaging, 15 | const int mode, 16 | const float global_grad_norm, 17 | const float max_grad_norm); 18 | 19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 20 | m.def("lamb", &multi_tensor_lamb_cuda, "Computes and apply update for LAMB optimizer"); 21 | } 22 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/optimizers/multi_tensor_distopt_adam.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void multi_tensor_fused_adam_cuda( 4 | int chunk_size, 5 | at::Tensor noop_flag, 6 | std::vector> tensor_lists, 7 | at::Tensor per_tensor_beta1, 8 | at::Tensor per_tensor_beta2, 9 | at::Tensor per_tensor_bias_correction, 10 | at::Tensor per_tensor_eps, 11 | at::Tensor per_tensor_weight_decay, 12 | float lr, 13 | float grad_scale, 14 | int step, 15 | int mode); 16 | 17 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 18 | m.def("multi_tensor_fused_adam", &multi_tensor_fused_adam_cuda, 19 | "Multi tensor Adam optimized CUDA implementation."); 20 | } 21 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer import amp 2 | from apex.transformer import functional 3 | from apex.transformer import parallel_state 4 | from apex.transformer import pipeline_parallel 5 | from apex.transformer import tensor_parallel 6 | from apex.transformer import utils 7 | from apex.transformer.enums import LayerType 8 | from apex.transformer.enums import AttnType 9 | from apex.transformer.enums import AttnMaskType 10 | 11 | 12 | __all__ = [ 13 | "amp", 14 | "functional", 15 | "parallel_state", 16 | "pipeline_parallel", 17 | "tensor_parallel", 18 | "utils", 19 | # enums.py 20 | "LayerType", 21 | "AttnType", 22 | "AttnMaskType", 23 | ] 24 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer import amp 2 | from apex.transformer import functional 3 | from apex.transformer import parallel_state 4 | from apex.transformer import pipeline_parallel 5 | from apex.transformer import tensor_parallel 6 | from apex.transformer import utils 7 | from apex.transformer.enums import LayerType 8 | from apex.transformer.enums import AttnType 9 | from apex.transformer.enums import AttnMaskType 10 | 11 | 12 | __all__ = [ 13 | "amp", 14 | "functional", 15 | "parallel_state", 16 | "pipeline_parallel", 17 | "tensor_parallel", 18 | "utils", 19 | # enums.py 20 | "LayerType", 21 | "AttnType", 22 | "AttnMaskType", 23 | ] 24 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/_autocast_utils.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Sequence 2 | 3 | import torch 4 | 5 | 6 | def _get_autocast_dtypes() -> Sequence[torch.dtype]: 7 | if torch.cuda.is_bf16_supported(): 8 | return [torch.half, torch.bfloat16] 9 | return [torch.half] 10 | 11 | 12 | def _get_current_dtype(dtype: Optional[torch.dtype] = None) -> torch.dtype: 13 | if not torch.is_autocast_enabled(): 14 | return torch.float or dtype 15 | else: 16 | return torch.get_autocast_gpu_dtype() 17 | 18 | 19 | def _cast_if_autocast_enabled(*args): 20 | if not torch.is_autocast_enabled(): 21 | return args 22 | else: 23 | return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype()) 24 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/_autocast_utils.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Sequence 2 | 3 | import torch 4 | 5 | 6 | def _get_autocast_dtypes() -> Sequence[torch.dtype]: 7 | if torch.cuda.is_bf16_supported(): 8 | return [torch.half, torch.bfloat16] 9 | return [torch.half] 10 | 11 | 12 | def _get_current_dtype(dtype: Optional[torch.dtype] = None) -> torch.dtype: 13 | if not torch.is_autocast_enabled(): 14 | return torch.float or dtype 15 | else: 16 | return torch.get_autocast_gpu_dtype() 17 | 18 | 19 | def _cast_if_autocast_enabled(*args): 20 | if not torch.is_autocast_enabled(): 21 | return args 22 | else: 23 | return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype()) 24 | -------------------------------------------------------------------------------- /SurgNetCode/apex/examples/simple/distributed/README.md: -------------------------------------------------------------------------------- 1 | **distributed_data_parallel.py** and **run.sh** show an example using Amp with 2 | [apex.parallel.DistributedDataParallel](https://nvidia.github.io/apex/parallel.html) or 3 | [torch.nn.parallel.DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#distributeddataparallel) 4 | and the Pytorch multiprocess launcher script, 5 | [torch.distributed.launch](https://pytorch.org/docs/master/distributed.html#launch-utility). 6 | The use of `Amp` with DistributedDataParallel does not need to change from ordinary 7 | single-process use. The only gotcha is that wrapping your model with `DistributedDataParallel` must 8 | come after the call to `amp.initialize`. Test via 9 | ```bash 10 | bash run.sh 11 | ``` 12 | 13 | **This is intended purely as an instructional example, not a performance showcase.** 14 | -------------------------------------------------------------------------------- /SurgNetCode/apex/examples/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Base image must at least have pytorch and CUDA installed. 2 | ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:22.02-py3 3 | FROM $BASE_IMAGE 4 | ARG BASE_IMAGE 5 | RUN echo "Installing Apex on top of ${BASE_IMAGE}" 6 | # make sure we don't overwrite some existing directory called "apex" 7 | WORKDIR /tmp/unique_for_apex 8 | # uninstall Apex if present, twice to make absolutely sure :) 9 | RUN pip uninstall -y apex || : 10 | RUN pip uninstall -y apex || : 11 | # SHA is something the user can touch to force recreation of this Docker layer, 12 | # and therefore force cloning of the latest version of Apex 13 | RUN SHA=ToUcHMe git clone https://github.com/NVIDIA/apex.git 14 | WORKDIR /tmp/unique_for_apex/apex 15 | RUN pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . 16 | WORKDIR /workspace 17 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/hrf.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class HRFDataset(CustomDataset): 9 | """HRF dataset. 10 | 11 | In segmentation map annotation for HRF, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(HRFDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/stare.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class STAREDataset(CustomDataset): 9 | """STARE dataset. 10 | 11 | In segmentation map annotation for STARE, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '.ah.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(STAREDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='.ah.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/drive.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class DRIVEDataset(CustomDataset): 9 | """DRIVE dataset. 10 | 11 | In segmentation map annotation for DRIVE, 0 stands for background, which is 12 | included in 2 categories. ``reduce_zero_label`` is fixed to False. The 13 | ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '_manual1.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(DRIVEDataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='_manual1.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/chase_db1.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | 3 | from .builder import DATASETS 4 | from .custom import CustomDataset 5 | 6 | 7 | @DATASETS.register_module() 8 | class ChaseDB1Dataset(CustomDataset): 9 | """Chase_db1 dataset. 10 | 11 | In segmentation map annotation for Chase_db1, 0 stands for background, 12 | which is included in 2 categories. ``reduce_zero_label`` is fixed to False. 13 | The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to 14 | '_1stHO.png'. 15 | """ 16 | 17 | CLASSES = ('background', 'vessel') 18 | 19 | PALETTE = [[120, 120, 120], [6, 230, 230]] 20 | 21 | def __init__(self, **kwargs): 22 | super(ChaseDB1Dataset, self).__init__( 23 | img_suffix='.png', 24 | seg_map_suffix='_1stHO.png', 25 | reduce_zero_label=False, 26 | **kwargs) 27 | assert osp.exists(self.img_dir) 28 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .compose import Compose 2 | from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, 3 | Transpose, to_tensor) 4 | from .loading import LoadAnnotations, LoadImageFromFile 5 | from .test_time_aug import MultiScaleFlipAug 6 | from .transforms import (AlignedResize, CLAHE, AdjustGamma, Normalize, Pad, 7 | PhotoMetricDistortion, RandomCrop, RandomFlip, 8 | RandomRotate, Rerange, Resize, RGB2Gray, SegRescale) 9 | 10 | __all__ = [ 11 | 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 12 | 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', 13 | 'MultiScaleFlipAug', 'AlignedResize', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 14 | 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', 15 | 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray' 16 | ] 17 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/__init__.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from .version import __version__, version_info 4 | 5 | MMCV_MIN = '1.1.4' 6 | MMCV_MAX = '1.3.0' 7 | 8 | 9 | def digit_version(version_str): 10 | digit_version = [] 11 | for x in version_str.split('.'): 12 | if x.isdigit(): 13 | digit_version.append(int(x)) 14 | elif x.find('rc') != -1: 15 | patch_version = x.split('rc') 16 | digit_version.append(int(patch_version[0]) - 1) 17 | digit_version.append(int(patch_version[1])) 18 | return digit_version 19 | 20 | 21 | mmcv_min_version = digit_version(MMCV_MIN) 22 | mmcv_max_version = digit_version(MMCV_MAX) 23 | mmcv_version = digit_version(mmcv.__version__) 24 | 25 | 26 | assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \ 27 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 28 | f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.' 29 | 30 | __all__ = ['__version__', 'version_info'] 31 | -------------------------------------------------------------------------------- /SurgNetCode/util/lr_sched.py: -------------------------------------------------------------------------------- 1 | # This source code is licensed under the license found in the 2 | # LICENSE file in the root directory of this source tree. 3 | # -------------------------------------------------------- 4 | # References: 5 | # MAE: https://github.com/facebookresearch/mae 6 | # -------------------------------------------------------- 7 | 8 | import math 9 | 10 | def adjust_learning_rate(optimizer, epoch, args): 11 | """Decay the learning rate with half-cycle cosine after warmup""" 12 | if epoch < args.warmup_epochs: 13 | lr = args.lr * epoch / args.warmup_epochs 14 | else: 15 | lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ 16 | (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) 17 | for param_group in optimizer.param_groups: 18 | if "lr_scale" in param_group: 19 | param_group["lr"] = lr * param_group["lr_scale"] 20 | else: 21 | param_group["lr"] = lr 22 | return lr 23 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .ade import ADE20KDataset 2 | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset 3 | from .chase_db1 import ChaseDB1Dataset 4 | from .cityscapes import CityscapesDataset 5 | from .custom import CustomDataset 6 | from .dataset_wrappers import ConcatDataset, RepeatDataset 7 | from .drive import DRIVEDataset 8 | from .hrf import HRFDataset 9 | from .pascal_context import PascalContextDataset 10 | from .stare import STAREDataset 11 | from .voc import PascalVOCDataset 12 | from .voc import LMY1800VOCDataset 13 | from .mapillary import MapillaryDataset 14 | from .cocostuff import CocoStuff 15 | 16 | __all__ = [ 17 | 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 18 | 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', 19 | 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', 20 | 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'MapillaryDataset', 'CocoStuff','LMY1800VOCDataset' 21 | ] 22 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/parallel/multiproc.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys 3 | import subprocess 4 | 5 | def docstring_hack(): 6 | """ 7 | Multiproc file which will launch a set of processes locally for multi-gpu 8 | usage: python -m apex.parallel.multiproc main.py ... 9 | """ 10 | pass 11 | 12 | argslist = list(sys.argv)[1:] 13 | world_size = torch.cuda.device_count() 14 | 15 | if '--world-size' in argslist: 16 | world_size = int(argslist[argslist.index('--world-size')+1]) 17 | else: 18 | argslist.append('--world-size') 19 | argslist.append(str(world_size)) 20 | 21 | workers = [] 22 | 23 | for i in range(world_size): 24 | if '--rank' in argslist: 25 | argslist[argslist.index('--rank')+1] = str(i) 26 | else: 27 | argslist.append('--rank') 28 | argslist.append(str(i)) 29 | stdout = None if i == 0 else open("GPU_"+str(i)+".log", "w") 30 | print(argslist) 31 | p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout) 32 | workers.append(p) 33 | 34 | for p in workers: 35 | p.wait() 36 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/parallel/multiproc.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys 3 | import subprocess 4 | 5 | def docstring_hack(): 6 | """ 7 | Multiproc file which will launch a set of processes locally for multi-gpu 8 | usage: python -m apex.parallel.multiproc main.py ... 9 | """ 10 | pass 11 | 12 | argslist = list(sys.argv)[1:] 13 | world_size = torch.cuda.device_count() 14 | 15 | if '--world-size' in argslist: 16 | world_size = int(argslist[argslist.index('--world-size')+1]) 17 | else: 18 | argslist.append('--world-size') 19 | argslist.append(str(world_size)) 20 | 21 | workers = [] 22 | 23 | for i in range(world_size): 24 | if '--rank' in argslist: 25 | argslist[argslist.index('--rank')+1] = str(i) 26 | else: 27 | argslist.append('--rank') 28 | argslist.append(str(i)) 29 | stdout = None if i == 0 else open("GPU_"+str(i)+".log", "w") 30 | print(argslist) 31 | p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout) 32 | workers.append(p) 33 | 34 | for p in workers: 35 | p.wait() 36 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/enums.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import enum 16 | 17 | 18 | class LayerType(enum.Enum): 19 | encoder = 1 20 | decoder = 2 21 | 22 | 23 | class AttnType(enum.Enum): 24 | self_attn = 1 25 | cross_attn = 2 26 | 27 | 28 | class AttnMaskType(enum.Enum): 29 | padding = 1 30 | causal = 2 31 | 32 | 33 | class ModelType(enum.Enum): 34 | encoder_or_decoder = 1 35 | encoder_and_decoder = 2 36 | -------------------------------------------------------------------------------- /SurgNetCode/apex/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = NVIDIAAPEX 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | gh-pages: 16 | git checkout gh-pages 17 | rm -rf build 18 | rm -rf source 19 | git checkout master -- . 20 | make html 21 | rm -rf ../_modules ../_sources ../_static 22 | mv -fv build/html/* ../ 23 | rm -rf build 24 | git add -A 25 | git commit -m "Generated gh-pages for `git log master -1 --pretty=short --abbrev-commit`" && git push origin gh-pages ; git checkout master 26 | 27 | .PHONY: help Makefile 28 | 29 | # Catch-all target: route all unknown targets to Sphinx using the new 30 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 31 | %: Makefile 32 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 33 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/enums.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | import enum 16 | 17 | 18 | class LayerType(enum.Enum): 19 | encoder = 1 20 | decoder = 2 21 | 22 | 23 | class AttnType(enum.Enum): 24 | self_attn = 1 25 | cross_attn = 2 26 | 27 | 28 | class AttnMaskType(enum.Enum): 29 | padding = 1 30 | causal = 2 31 | 32 | 33 | class ModelType(enum.Enum): 34 | encoder_or_decoder = 1 35 | encoder_and_decoder = 2 36 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/multi_tensor_apply/multi_tensor_apply.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class MultiTensorApply(object): 4 | available = False 5 | warned = False 6 | 7 | def __init__(self, chunk_size): 8 | try: 9 | import amp_C 10 | MultiTensorApply.available = True 11 | self.chunk_size = chunk_size 12 | except ImportError as err: 13 | MultiTensorApply.available = False 14 | MultiTensorApply.import_err = err 15 | 16 | def check_avail(self): 17 | if MultiTensorApply.available == False: 18 | raise RuntimeError( 19 | "Attempted to call MultiTensorApply method, but MultiTensorApply " 20 | "is not available, possibly because Apex was installed without " 21 | "--cpp_ext --cuda_ext. Original import error message:", 22 | MultiTensorApply.import_err) 23 | 24 | def __call__(self, op, noop_flag_buffer, tensor_lists, *args): 25 | self.check_avail() 26 | 27 | return op(self.chunk_size, 28 | noop_flag_buffer, 29 | tensor_lists, 30 | *args) 31 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/multi_tensor_apply/multi_tensor_apply.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class MultiTensorApply(object): 4 | available = False 5 | warned = False 6 | 7 | def __init__(self, chunk_size): 8 | try: 9 | import amp_C 10 | MultiTensorApply.available = True 11 | self.chunk_size = chunk_size 12 | except ImportError as err: 13 | MultiTensorApply.available = False 14 | MultiTensorApply.import_err = err 15 | 16 | def check_avail(self): 17 | if MultiTensorApply.available == False: 18 | raise RuntimeError( 19 | "Attempted to call MultiTensorApply method, but MultiTensorApply " 20 | "is not available, possibly because Apex was installed without " 21 | "--cpp_ext --cuda_ext. Original import error message:", 22 | MultiTensorApply.import_err) 23 | 24 | def __call__(self, op, noop_flag_buffer, tensor_lists, *args): 25 | self.check_avail() 26 | 27 | return op(self.chunk_size, 28 | noop_flag_buffer, 29 | tensor_lists, 30 | *args) 31 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/xentropy/softmax_xentropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import xentropy_cuda 3 | 4 | class SoftmaxCrossEntropyLoss(torch.autograd.Function): 5 | @staticmethod 6 | def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False): 7 | losses, max_log_sum_exp = xentropy_cuda.forward( 8 | logits, labels, smoothing, half_to_float) 9 | losses.masked_fill_(labels==padding_idx, 0) 10 | 11 | ctx.save_for_backward(logits, max_log_sum_exp, labels, 12 | torch.FloatTensor([smoothing]), 13 | torch.LongTensor([padding_idx])) 14 | 15 | return losses 16 | 17 | @staticmethod 18 | def backward(ctx, grad_loss): 19 | logits, max_log_sum_exp, labels, smoothing, padding_idx = ctx.saved_tensors 20 | 21 | if not grad_loss.is_contiguous(): 22 | grad_loss = grad_loss.contiguous() 23 | grad_loss.masked_fill_(labels==padding_idx.item(), 0) 24 | grad_logits = xentropy_cuda.backward( 25 | grad_loss.contiguous(), logits, max_log_sum_exp, 26 | labels, smoothing.item()) 27 | 28 | return grad_logits, None, None, None, None 29 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/xentropy/softmax_xentropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import xentropy_cuda 3 | 4 | class SoftmaxCrossEntropyLoss(torch.autograd.Function): 5 | @staticmethod 6 | def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False): 7 | losses, max_log_sum_exp = xentropy_cuda.forward( 8 | logits, labels, smoothing, half_to_float) 9 | losses.masked_fill_(labels==padding_idx, 0) 10 | 11 | ctx.save_for_backward(logits, max_log_sum_exp, labels, 12 | torch.FloatTensor([smoothing]), 13 | torch.LongTensor([padding_idx])) 14 | 15 | return losses 16 | 17 | @staticmethod 18 | def backward(ctx, grad_loss): 19 | logits, max_log_sum_exp, labels, smoothing, padding_idx = ctx.saved_tensors 20 | 21 | if not grad_loss.is_contiguous(): 22 | grad_loss = grad_loss.contiguous() 23 | grad_loss.masked_fill_(labels==padding_idx.item(), 0) 24 | grad_logits = xentropy_cuda.backward( 25 | grad_loss.contiguous(), logits, max_log_sum_exp, 26 | labels, smoothing.item()) 27 | 28 | return grad_logits, None, None, None, None 29 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/__init__.py: -------------------------------------------------------------------------------- 1 | from .ann_head import ANNHead 2 | from .apc_head import APCHead 3 | from .aspp_head import ASPPHead 4 | from .cc_head import CCHead 5 | from .da_head import DAHead 6 | from .dm_head import DMHead 7 | from .dnl_head import DNLHead 8 | from .ema_head import EMAHead 9 | from .enc_head import EncHead 10 | from .fcn_head import FCNHead 11 | from .fpn_head import FPNHead 12 | from .gc_head import GCHead 13 | from .lraspp_head import LRASPPHead 14 | from .nl_head import NLHead 15 | from .ocr_head import OCRHead 16 | from .point_head import PointHead 17 | from .psa_head import PSAHead 18 | from .psp_head import PSPHead 19 | from .sep_aspp_head import DepthwiseSeparableASPPHead 20 | from .sep_fcn_head import DepthwiseSeparableFCNHead 21 | from .uper_head import UPerHead 22 | 23 | 24 | from .segformer_head import SegFormerHead 25 | 26 | __all__ = [ 27 | 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead', 28 | 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead', 29 | 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead', 30 | 'PointHead', 'APCHead', 'DMHead', 'LRASPPHead', 31 | 'SegFormerHead', 32 | ] 33 | -------------------------------------------------------------------------------- /SurgNetCode/apex/docs/source/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | {% block sidebartitle %} {{ super() }} 3 | 4 | 32 | {% endblock %} 33 | 34 | {% block footer %} {{ super() }} 35 | 36 | 51 | {% endblock %} 52 | -------------------------------------------------------------------------------- /SurgNetCode/mmcv_custom/apex_runner/optimizer.py: -------------------------------------------------------------------------------- 1 | from mmcv.runner import OptimizerHook, HOOKS 2 | try: 3 | import apex 4 | except: 5 | print('apex is not installed') 6 | 7 | 8 | @HOOKS.register_module() 9 | class DistOptimizerHook(OptimizerHook): 10 | """Optimizer hook for distributed training.""" 11 | 12 | def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False): 13 | self.grad_clip = grad_clip 14 | self.coalesce = coalesce 15 | self.bucket_size_mb = bucket_size_mb 16 | self.update_interval = update_interval 17 | self.use_fp16 = use_fp16 18 | 19 | def before_run(self, runner): 20 | runner.optimizer.zero_grad() 21 | 22 | def after_train_iter(self, runner): 23 | runner.outputs['loss'] /= self.update_interval 24 | if self.use_fp16: 25 | with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss: 26 | scaled_loss.backward() 27 | else: 28 | runner.outputs['loss'].backward() 29 | if self.every_n_iters(runner, self.update_interval): 30 | if self.grad_clip is not None: 31 | self.clip_grads(runner.model.parameters()) 32 | runner.optimizer.step() 33 | runner.optimizer.zero_grad() 34 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/utils/make_divisible.py: -------------------------------------------------------------------------------- 1 | def make_divisible(value, divisor, min_value=None, min_ratio=0.9): 2 | """Make divisible function. 3 | 4 | This function rounds the channel number to the nearest value that can be 5 | divisible by the divisor. It is taken from the original tf repo. It ensures 6 | that all layers have a channel number that is divisible by divisor. It can 7 | be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa 8 | 9 | Args: 10 | value (int): The original channel number. 11 | divisor (int): The divisor to fully divide the channel number. 12 | min_value (int): The minimum value of the output channel. 13 | Default: None, means that the minimum value equal to the divisor. 14 | min_ratio (float): The minimum ratio of the rounded channel number to 15 | the original channel number. Default: 0.9. 16 | 17 | Returns: 18 | int: The modified output channel number. 19 | """ 20 | 21 | if min_value is None: 22 | min_value = divisor 23 | new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) 24 | # Make sure that round down does not go down by more than (1-min_ratio). 25 | if new_value < min_ratio * value: 26 | new_value += divisor 27 | return new_value 28 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void multi_tensor_lamb_compute_update_term_cuda( 4 | int chunk_size, 5 | at::Tensor noop_flag, 6 | std::vector> tensor_lists, 7 | at::Tensor per_tensor_beta1, 8 | at::Tensor per_tensor_beta2, 9 | at::Tensor per_tensor_beta3, 10 | at::Tensor per_tensor_bias_correction, 11 | at::Tensor step, 12 | at::Tensor per_tensor_epsilon, 13 | const int mode, 14 | at::Tensor per_tensor_decay, 15 | at::Tensor global_scale, 16 | at::Tensor global_grad_norm, 17 | const float max_grad_norm); 18 | 19 | void multi_tensor_lamb_update_weights_cuda( 20 | int chunk_size, 21 | at::Tensor noop_flag, 22 | std::vector> tensor_lists, 23 | at::Tensor per_tensor_param_norm, 24 | at::Tensor per_tensor_update_norm, 25 | at::Tensor update_norm_offset, 26 | at::Tensor learning_rate, 27 | at::Tensor per_tensor_decay, 28 | at::Tensor global_grad_norm, 29 | bool use_nvlamb); 30 | 31 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 32 | m.def("multi_tensor_lamb_compute_update_term", &multi_tensor_lamb_compute_update_term_cuda, 33 | "Computes update term for LAMB optimizer"); 34 | m.def("multi_tensor_lamb_update_weights", &multi_tensor_lamb_update_weights_cuda, 35 | "Applies update term for LAMB optimizer"); 36 | } 37 | -------------------------------------------------------------------------------- /SurgNetCode/apex/LICENSE: -------------------------------------------------------------------------------- 1 | All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/cc_head.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from ..builder import HEADS 4 | from .fcn_head import FCNHead 5 | 6 | try: 7 | from mmcv.ops import CrissCrossAttention 8 | except ModuleNotFoundError: 9 | CrissCrossAttention = None 10 | 11 | 12 | @HEADS.register_module() 13 | class CCHead(FCNHead): 14 | """CCNet: Criss-Cross Attention for Semantic Segmentation. 15 | 16 | This head is the implementation of `CCNet 17 | `_. 18 | 19 | Args: 20 | recurrence (int): Number of recurrence of Criss Cross Attention 21 | module. Default: 2. 22 | """ 23 | 24 | def __init__(self, recurrence=2, **kwargs): 25 | if CrissCrossAttention is None: 26 | raise RuntimeError('Please install mmcv-full for ' 27 | 'CrissCrossAttention ops') 28 | super(CCHead, self).__init__(num_convs=2, **kwargs) 29 | self.recurrence = recurrence 30 | self.cca = CrissCrossAttention(self.channels) 31 | 32 | def forward(self, inputs): 33 | """Forward function.""" 34 | x = self._transform_inputs(inputs) 35 | output = self.convs[0](x) 36 | for _ in range(self.recurrence): 37 | output = self.cca(output) 38 | output = self.convs[1](output) 39 | if self.concat_input: 40 | output = self.conv_cat(torch.cat([x, output], dim=1)) 41 | output = self.cls_seg(output) 42 | return output 43 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/nccl_p2p/nccl_p2p.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include "nccl_p2p_cuda.cuh" 18 | 19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 20 | m.def("get_unique_nccl_id", &apex::contrib::nccl_p2p::get_unique_nccl_id, "get_unique_nccl_id"); 21 | m.def("init_nccl_comm", &apex::contrib::nccl_p2p::init_nccl_comm, "init_nccl_comm"); 22 | m.def("nccl_send", &apex::contrib::nccl_p2p::nccl_send, "nccl_send"); 23 | m.def("nccl_recv", &apex::contrib::nccl_p2p::nccl_recv, "nccl_recv"); 24 | m.def("left_right_halo_exchange_inplace", &apex::contrib::nccl_p2p::left_right_halo_exchange_inplace, "left_right_halo_exchange_inplace"); 25 | m.def("left_right_halo_exchange", &apex::contrib::nccl_p2p::left_right_halo_exchange, "left_right_halo_exchange"); 26 | m.def("add_delay", &apex::contrib::nccl_p2p::add_delay, "add_delay"); 27 | } 28 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/fp16_utils/README.md: -------------------------------------------------------------------------------- 1 | fp16_optimizer.py contains `FP16_Optimizer`, a Python class designed to wrap an existing Pytorch optimizer and automatically enable master parameters and loss scaling in a manner transparent to the user. To use `FP16_Optimizer`, only two lines of one's Python model need to change. 2 | 3 | #### [FP16_Optimizer API documentation](https://nvidia.github.io/apex/fp16_utils.html#automatic-management-of-master-params-loss-scaling) 4 | 5 | #### [Simple examples with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/FP16_Optimizer_simple) 6 | 7 | #### [Imagenet with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/imagenet) 8 | 9 | #### [word_language_model with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/word_language_model) 10 | 11 | 12 | fp16_util.py contains a number of utilities to manually manage master parameters and loss scaling, if the user chooses. 13 | 14 | #### [Manual management documentation](https://nvidia.github.io/apex/fp16_utils.html#manual-master-parameter-management) 15 | 16 | The [Imagenet with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/imagenet) and [word_language_model with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/word_language_model) directories also contain `main.py` files that demonstrate manual management of master parameters and static loss scaling. These examples illustrate what sort of operations `FP16_Optimizer` is performing automatically. 17 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/pipeline_parallel/schedules/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer import parallel_state 2 | from apex.transformer.pipeline_parallel.utils import get_num_microbatches 3 | from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import ( 4 | forward_backward_no_pipelining, 5 | ) 6 | from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import ( 7 | _forward_backward_pipelining_with_interleaving, 8 | ) 9 | from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import ( 10 | forward_backward_pipelining_without_interleaving, 11 | ) 12 | 13 | __all__ = [ 14 | "get_forward_backward_func", 15 | ] 16 | 17 | 18 | class ExperimentalWarning(Warning): 19 | pass 20 | 21 | 22 | def get_forward_backward_func( 23 | virtual_pipeline_model_parallel_size, pipeline_model_parallel_size, 24 | ): 25 | if parallel_state.get_pipeline_model_parallel_world_size() > 1: 26 | if virtual_pipeline_model_parallel_size is not None: 27 | if get_num_microbatches() % pipeline_model_parallel_size != 0: 28 | msg = "number of microbatches is not divisible by pipeline-parallel size when using interleaved schedule" 29 | raise RuntimeError(msg) 30 | forward_backward_func = _forward_backward_pipelining_with_interleaving 31 | else: 32 | forward_backward_func = forward_backward_pipelining_without_interleaving 33 | else: 34 | forward_backward_func = forward_backward_no_pipelining 35 | return forward_backward_func 36 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L0/run_amp/test_larc.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | from torch import nn 5 | from torch.nn import Parameter 6 | 7 | from apex import amp 8 | from apex.parallel.LARC import LARC 9 | from utils import common_init 10 | 11 | 12 | class MyModel(torch.nn.Module): 13 | def __init__(self, unique): 14 | super(MyModel, self).__init__() 15 | self.weight0 = Parameter( 16 | unique + torch.arange(2, device="cuda", dtype=torch.float32) 17 | ) 18 | 19 | def forward(self, input): 20 | return (input * self.weight0).sum() 21 | 22 | 23 | class TestLARC(unittest.TestCase): 24 | def setUp(self): 25 | self.x = torch.ones((2), device="cuda", dtype=torch.float32) 26 | common_init(self) 27 | 28 | def tearDown(self): 29 | pass 30 | 31 | def test_larc_mixed_precision(self): 32 | for opt_level in ["O0", "O1", "O2", "O3"]: 33 | model = MyModel(1) 34 | 35 | optimizer = LARC( 36 | torch.optim.SGD( 37 | [{"params": model.parameters(), "lr": 0.25}], momentum=0.125 38 | ) 39 | ) 40 | 41 | model, optimizer = amp.initialize( 42 | model, optimizer, opt_level=opt_level, verbosity=0 43 | ) 44 | 45 | optimizer.zero_grad() 46 | loss = model(self.x) 47 | with amp.scale_loss(loss, optimizer) as scaled_loss: 48 | scaled_loss.backward() 49 | optimizer.step() 50 | 51 | 52 | if __name__ == "__main__": 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/pipeline_parallel/schedules/__init__.py: -------------------------------------------------------------------------------- 1 | from apex.transformer import parallel_state 2 | from apex.transformer.pipeline_parallel.utils import get_num_microbatches 3 | from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import ( 4 | forward_backward_no_pipelining, 5 | ) 6 | from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import ( 7 | _forward_backward_pipelining_with_interleaving, 8 | ) 9 | from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import ( 10 | forward_backward_pipelining_without_interleaving, 11 | ) 12 | 13 | __all__ = [ 14 | "get_forward_backward_func", 15 | ] 16 | 17 | 18 | class ExperimentalWarning(Warning): 19 | pass 20 | 21 | 22 | def get_forward_backward_func( 23 | virtual_pipeline_model_parallel_size, pipeline_model_parallel_size, 24 | ): 25 | if parallel_state.get_pipeline_model_parallel_world_size() > 1: 26 | if virtual_pipeline_model_parallel_size is not None: 27 | if get_num_microbatches() % pipeline_model_parallel_size != 0: 28 | msg = "number of microbatches is not divisible by pipeline-parallel size when using interleaved schedule" 29 | raise RuntimeError(msg) 30 | forward_backward_func = _forward_backward_pipelining_with_interleaving 31 | else: 32 | forward_backward_func = forward_backward_pipelining_without_interleaving 33 | else: 34 | forward_backward_func = forward_backward_no_pipelining 35 | return forward_backward_func 36 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/amp/compat.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # True for post-0.4, when Variables/Tensors merged. 4 | def variable_is_tensor(): 5 | v = torch.autograd.Variable() 6 | return isinstance(v, torch.Tensor) 7 | 8 | def tensor_is_variable(): 9 | x = torch.Tensor() 10 | return type(x) == torch.autograd.Variable 11 | 12 | # False for post-0.4 13 | def tensor_is_float_tensor(): 14 | x = torch.Tensor() 15 | return type(x) == torch.FloatTensor 16 | 17 | # Akin to `torch.is_tensor`, but returns True for Variable 18 | # objects in pre-0.4. 19 | def is_tensor_like(x): 20 | return torch.is_tensor(x) or isinstance(x, torch.autograd.Variable) 21 | 22 | # Wraps `torch.is_floating_point` if present, otherwise checks 23 | # the suffix of `x.type()`. 24 | def is_floating_point(x): 25 | if hasattr(torch, 'is_floating_point'): 26 | return torch.is_floating_point(x) 27 | try: 28 | torch_type = x.type() 29 | return torch_type.endswith('FloatTensor') or \ 30 | torch_type.endswith('HalfTensor') or \ 31 | torch_type.endswith('DoubleTensor') 32 | except AttributeError: 33 | return False 34 | 35 | def scalar_python_val(x): 36 | if hasattr(x, 'item'): 37 | return x.item() 38 | else: 39 | if isinstance(x, torch.autograd.Variable): 40 | return x.data[0] 41 | else: 42 | return x[0] 43 | 44 | # Accounts for the possibility that some ops may be removed from a namespace. 45 | def filter_attrs(module, attrs): 46 | return list(attrname for attrname in attrs if hasattr(module, attrname)) 47 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/amp/compat.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # True for post-0.4, when Variables/Tensors merged. 4 | def variable_is_tensor(): 5 | v = torch.autograd.Variable() 6 | return isinstance(v, torch.Tensor) 7 | 8 | def tensor_is_variable(): 9 | x = torch.Tensor() 10 | return type(x) == torch.autograd.Variable 11 | 12 | # False for post-0.4 13 | def tensor_is_float_tensor(): 14 | x = torch.Tensor() 15 | return type(x) == torch.FloatTensor 16 | 17 | # Akin to `torch.is_tensor`, but returns True for Variable 18 | # objects in pre-0.4. 19 | def is_tensor_like(x): 20 | return torch.is_tensor(x) or isinstance(x, torch.autograd.Variable) 21 | 22 | # Wraps `torch.is_floating_point` if present, otherwise checks 23 | # the suffix of `x.type()`. 24 | def is_floating_point(x): 25 | if hasattr(torch, 'is_floating_point'): 26 | return torch.is_floating_point(x) 27 | try: 28 | torch_type = x.type() 29 | return torch_type.endswith('FloatTensor') or \ 30 | torch_type.endswith('HalfTensor') or \ 31 | torch_type.endswith('DoubleTensor') 32 | except AttributeError: 33 | return False 34 | 35 | def scalar_python_val(x): 36 | if hasattr(x, 'item'): 37 | return x.item() 38 | else: 39 | if isinstance(x, torch.autograd.Variable): 40 | return x.data[0] 41 | else: 42 | return x[0] 43 | 44 | # Accounts for the possibility that some ops may be removed from a namespace. 45 | def filter_attrs(module, attrs): 46 | return list(attrname for attrname in attrs if hasattr(module, attrname)) 47 | -------------------------------------------------------------------------------- /SurgNetCode/util/crop.py: -------------------------------------------------------------------------------- 1 | # This source code is licensed under the license found in the 2 | # LICENSE file in the root directory of this source tree. 3 | # -------------------------------------------------------- 4 | # References: 5 | # MAE: https://github.com/facebookresearch/mae 6 | # -------------------------------------------------------- 7 | 8 | import math 9 | 10 | import torch 11 | 12 | from torchvision import transforms 13 | from torchvision.transforms import functional as F 14 | 15 | 16 | class RandomResizedCrop(transforms.RandomResizedCrop): 17 | """ 18 | RandomResizedCrop for matching TF/TPU implementation: no for-loop is used. 19 | This may lead to results different with torchvision's version. 20 | Following BYOL's TF code: 21 | https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206 22 | """ 23 | @staticmethod 24 | def get_params(img, scale, ratio): 25 | width, height = F._get_image_size(img) 26 | area = height * width 27 | 28 | target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() 29 | log_ratio = torch.log(torch.tensor(ratio)) 30 | aspect_ratio = torch.exp( 31 | torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) 32 | ).item() 33 | 34 | w = int(round(math.sqrt(target_area * aspect_ratio))) 35 | h = int(round(math.sqrt(target_area / aspect_ratio))) 36 | 37 | w = min(w, width) 38 | h = min(h, height) 39 | 40 | i = torch.randint(0, height - h + 1, size=(1,)).item() 41 | j = torch.randint(0, width - w + 1, size=(1,)).item() 42 | 43 | return i, j, h, w -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L0/run_transformer/test_transformer_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import torch 4 | from torch.testing._internal import common_utils 5 | 6 | logging.getLogger("torch").setLevel(logging.WARNING) 7 | 8 | from apex.transformer import parallel_state 9 | from apex.transformer.tensor_parallel import utils 10 | from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase 11 | 12 | logging.getLogger("apex").setLevel(logging.WARNING) 13 | 14 | 15 | class TransformerUtilsTest(NcclDistributedTestBase): 16 | def test_split_tensor_along_last_dim(self): 17 | for tensor_model_paralell_world_size in range(1, self.world_size + 1): 18 | if self.world_size % tensor_model_paralell_world_size > 0: 19 | continue 20 | with self.subTest( 21 | tensor_model_paralell_world_size=tensor_model_paralell_world_size 22 | ): 23 | parallel_state.initialize_model_parallel( 24 | tensor_model_parallel_size_=tensor_model_paralell_world_size 25 | ) 26 | 27 | device = "cpu" 28 | input_tensor = torch.randn((100, 100, 100), device=device) 29 | splits = utils.split_tensor_along_last_dim(input_tensor, 10) 30 | last_dim_shapes = torch.tensor( 31 | [int(split.size()[-1]) for split in splits] 32 | ) 33 | 34 | self.assertTrue(torch.equal(last_dim_shapes, torch.full((10,), 10),)) 35 | 36 | parallel_state.destroy_model_parallel() 37 | 38 | 39 | if __name__ == "__main__": 40 | common_utils.run_tests() 41 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/peer_memory/peer_memory.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #include "peer_memory_cuda.cuh" 18 | 19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 20 | m.def("allocate_raw", &apex::contrib::peer_memory::allocate_raw, "allocate_raw"); 21 | m.def("free_raw", &apex::contrib::peer_memory::free_raw, "free_raw"); 22 | m.def("zero", &apex::contrib::peer_memory::zero, "zero"); 23 | m.def("get_raw_ipc_address", &apex::contrib::peer_memory::get_raw_ipc_address, "get_raw_ipc_address"); 24 | m.def("get_raw_peers", &apex::contrib::peer_memory::get_raw_peers, "get_raw_peers"); 25 | m.def("blob_view_half", &apex::contrib::peer_memory::blob_view_half, "blob_view_half"); 26 | m.def("blob_view_float", &apex::contrib::peer_memory::blob_view_float, "blob_view_float"); 27 | m.def("blob_view_int", &apex::contrib::peer_memory::blob_view_int, "blob_view_int"); 28 | m.def("push_pull_halos_1d", &apex::contrib::peer_memory::push_pull_halos_1d, "push_pull_halos_1d"); 29 | } 30 | -------------------------------------------------------------------------------- /SurgNetCode/apex/docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. PyTorch documentation master file, created by 2 | sphinx-quickstart on Fri Dec 23 13:31:47 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | :github_url: https://github.com/nvidia/apex 7 | 8 | Apex (A PyTorch Extension) 9 | =================================== 10 | 11 | This site contains the API documentation for Apex (https://github.com/nvidia/apex), 12 | a Pytorch extension with NVIDIA-maintained utilities to streamline mixed precision and distributed training. Some of the code here will be included in upstream Pytorch eventually. The intention of Apex is to make up-to-date utilities available to users as quickly as possible. 13 | 14 | Installation instructions can be found here: https://github.com/NVIDIA/apex#quick-start. 15 | 16 | Some other useful material, including GTC 2019 and Pytorch DevCon 2019 Slides, can be found here: https://github.com/mcarilli/mixed_precision_references. 17 | 18 | .. toctree:: 19 | :maxdepth: 1 20 | :caption: AMP: Automatic Mixed Precision 21 | 22 | amp 23 | 24 | .. toctree:: 25 | :maxdepth: 1 26 | :caption: Distributed Training 27 | 28 | parallel 29 | 30 | .. toctree:: 31 | :maxdepth: 1 32 | :caption: Fused Optimizers 33 | 34 | optimizers 35 | 36 | .. toctree:: 37 | :maxdepth: 1 38 | :caption: Fused Layer Norm 39 | 40 | layernorm 41 | 42 | .. .. toctree:: 43 | :maxdepth: 1 44 | :caption: Deprecated mixed precision API 45 | fp16_util 46 | 47 | .. RNN 48 | 49 | Indices and tables 50 | ================== 51 | 52 | * :ref:`genindex` 53 | * :ref:`modindex` 54 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/distributed/amp_master_params/compare.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | model_params_rank0 = torch.load("rank0model.pth", 4 | map_location = lambda storage, loc: storage.cuda(0)) 5 | model_params_rank1 = torch.load("rank1model.pth", 6 | map_location = lambda storage, loc: storage.cuda(0)) 7 | master_params_rank0 = torch.load("rank0master.pth", 8 | map_location = lambda storage, loc: storage.cuda(0)) 9 | master_params_rank1 = torch.load("rank1master.pth", 10 | map_location = lambda storage, loc: storage.cuda(0)) 11 | 12 | for model_rank0, model_rank1, master_rank0, master_rank1 in zip( 13 | model_params_rank0, 14 | model_params_rank1, 15 | master_params_rank0, 16 | master_params_rank1): 17 | assert torch.allclose(model_rank0, model_rank1), "Model param mismatch" 18 | assert torch.allclose(master_rank0, master_rank1), "Master param mismatch" 19 | # Some debugging/investigation assistance code: 20 | # maxval, maxind = torch.max(((torch.abs(model_rank0).float())/torch.abs(master_rank0)).view(-1), 0) 21 | # offending_val_half = model_rank0.view(-1)[maxind.item()] 22 | # offending_val_float = master_rank0.view(-1)[maxind.item()] 23 | # print(maxval.item(), maxind.item(), offending_val_half.item(), offending_val_float.item(), 24 | # offending_val_float.half().item()) 25 | # rtol needs to be > 2^-11 because of denormals... 26 | assert torch.allclose(model_rank0, master_rank0.half(), rtol=.005), "Model-master mismatch" 27 | 28 | print("OK: Model and master params match across ranks.") 29 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/pipelines/compose.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | from mmcv.utils import build_from_cfg 4 | 5 | from ..builder import PIPELINES 6 | 7 | 8 | @PIPELINES.register_module() 9 | class Compose(object): 10 | """Compose multiple transforms sequentially. 11 | 12 | Args: 13 | transforms (Sequence[dict | callable]): Sequence of transform object or 14 | config dict to be composed. 15 | """ 16 | 17 | def __init__(self, transforms): 18 | assert isinstance(transforms, collections.abc.Sequence) 19 | self.transforms = [] 20 | for transform in transforms: 21 | if isinstance(transform, dict): 22 | transform = build_from_cfg(transform, PIPELINES) 23 | self.transforms.append(transform) 24 | elif callable(transform): 25 | self.transforms.append(transform) 26 | else: 27 | raise TypeError('transform must be callable or a dict') 28 | 29 | def __call__(self, data): 30 | """Call function to apply transforms sequentially. 31 | 32 | Args: 33 | data (dict): A result dict contains the data to transform. 34 | 35 | Returns: 36 | dict: Transformed data. 37 | """ 38 | 39 | for t in self.transforms: 40 | data = t(data) 41 | if data is None: 42 | return None 43 | return data 44 | 45 | def __repr__(self): 46 | format_string = self.__class__.__name__ + '(' 47 | for t in self.transforms: 48 | format_string += '\n' 49 | format_string += f' {t}' 50 | format_string += '\n)' 51 | return format_string 52 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/amp/lists/tensor_overrides.py: -------------------------------------------------------------------------------- 1 | from .. import compat 2 | from . import torch_overrides 3 | 4 | import importlib 5 | 6 | import torch 7 | 8 | # if compat.variable_is_tensor() and not compat.tensor_is_variable(): 9 | MODULE = torch.Tensor 10 | # else: 11 | # MODULE = torch.autograd.Variable 12 | 13 | 14 | FP16_FUNCS = compat.filter_attrs(MODULE, [ 15 | '__matmul__', 16 | ]) 17 | 18 | FP32_FUNCS = compat.filter_attrs(MODULE, [ 19 | '__ipow__', 20 | '__pow__', 21 | '__rpow__', 22 | 23 | # Cast to fp32 before transfer to CPU 24 | 'cpu', 25 | ]) 26 | 27 | CASTS = compat.filter_attrs(MODULE, [ 28 | '__add__', 29 | '__div__', 30 | '__eq__', 31 | '__ge__', 32 | '__gt__', 33 | '__iadd__', 34 | '__idiv__', 35 | '__imul__', 36 | '__isub__', 37 | '__itruediv__', 38 | '__le__', 39 | '__lt__', 40 | '__mul__', 41 | '__ne__', 42 | '__radd__', 43 | '__rdiv__', 44 | '__rmul__', 45 | '__rsub__', 46 | '__rtruediv__', 47 | '__sub__', 48 | '__truediv__', 49 | ]) 50 | 51 | # None of these, but here to make code cleaner. 52 | SEQUENCE_CASTS = [] 53 | 54 | # We need to grab all the methods from torch_overrides and add them to 55 | # the Tensor lists as well, as almost all methods are duplicated 56 | # between `torch` and `torch.Tensor` (and check with `hasattr`, 57 | # because a few random ones aren't defined on Tensor) 58 | _self_mod = importlib.import_module(__name__) 59 | for attrname in ['FP16_FUNCS', 'FP32_FUNCS', 'CASTS', 'SEQUENCE_CASTS']: 60 | lst = getattr(_self_mod, attrname) 61 | for fn in getattr(torch_overrides, attrname): 62 | if hasattr(MODULE, fn): 63 | lst.append(fn) 64 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/amp/lists/tensor_overrides.py: -------------------------------------------------------------------------------- 1 | from .. import compat 2 | from . import torch_overrides 3 | 4 | import importlib 5 | 6 | import torch 7 | 8 | # if compat.variable_is_tensor() and not compat.tensor_is_variable(): 9 | MODULE = torch.Tensor 10 | # else: 11 | # MODULE = torch.autograd.Variable 12 | 13 | 14 | FP16_FUNCS = compat.filter_attrs(MODULE, [ 15 | '__matmul__', 16 | ]) 17 | 18 | FP32_FUNCS = compat.filter_attrs(MODULE, [ 19 | '__ipow__', 20 | '__pow__', 21 | '__rpow__', 22 | 23 | # Cast to fp32 before transfer to CPU 24 | 'cpu', 25 | ]) 26 | 27 | CASTS = compat.filter_attrs(MODULE, [ 28 | '__add__', 29 | '__div__', 30 | '__eq__', 31 | '__ge__', 32 | '__gt__', 33 | '__iadd__', 34 | '__idiv__', 35 | '__imul__', 36 | '__isub__', 37 | '__itruediv__', 38 | '__le__', 39 | '__lt__', 40 | '__mul__', 41 | '__ne__', 42 | '__radd__', 43 | '__rdiv__', 44 | '__rmul__', 45 | '__rsub__', 46 | '__rtruediv__', 47 | '__sub__', 48 | '__truediv__', 49 | ]) 50 | 51 | # None of these, but here to make code cleaner. 52 | SEQUENCE_CASTS = [] 53 | 54 | # We need to grab all the methods from torch_overrides and add them to 55 | # the Tensor lists as well, as almost all methods are duplicated 56 | # between `torch` and `torch.Tensor` (and check with `hasattr`, 57 | # because a few random ones aren't defined on Tensor) 58 | _self_mod = importlib.import_module(__name__) 59 | for attrname in ['FP16_FUNCS', 'FP32_FUNCS', 'CASTS', 'SEQUENCE_CASTS']: 60 | lst = getattr(_self_mod, attrname) 61 | for fn in getattr(torch_overrides, attrname): 62 | if hasattr(MODULE, fn): 63 | lst.append(fn) 64 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/datasets/dataset_wrappers.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data.dataset import ConcatDataset as _ConcatDataset 2 | 3 | from .builder import DATASETS 4 | 5 | 6 | @DATASETS.register_module() 7 | class ConcatDataset(_ConcatDataset): 8 | """A wrapper of concatenated dataset. 9 | 10 | Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but 11 | concat the group flag for image aspect ratio. 12 | 13 | Args: 14 | datasets (list[:obj:`Dataset`]): A list of datasets. 15 | """ 16 | 17 | def __init__(self, datasets): 18 | super(ConcatDataset, self).__init__(datasets) 19 | self.CLASSES = datasets[0].CLASSES 20 | self.PALETTE = datasets[0].PALETTE 21 | 22 | 23 | @DATASETS.register_module() 24 | class RepeatDataset(object): 25 | """A wrapper of repeated dataset. 26 | 27 | The length of repeated dataset will be `times` larger than the original 28 | dataset. This is useful when the data loading time is long but the dataset 29 | is small. Using RepeatDataset can reduce the data loading time between 30 | epochs. 31 | 32 | Args: 33 | dataset (:obj:`Dataset`): The dataset to be repeated. 34 | times (int): Repeat times. 35 | """ 36 | 37 | def __init__(self, dataset, times): 38 | self.dataset = dataset 39 | self.times = times 40 | self.CLASSES = dataset.CLASSES 41 | self.PALETTE = dataset.PALETTE 42 | self._ori_len = len(self.dataset) 43 | 44 | def __getitem__(self, idx): 45 | """Get item from original dataset.""" 46 | return self.dataset[idx % self._ori_len] 47 | 48 | def __len__(self): 49 | """The length is multiplied by ``times``""" 50 | return self.times * self._ori_len 51 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/nl_head.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from mmcv.cnn import NonLocal2d 3 | 4 | from ..builder import HEADS 5 | from .fcn_head import FCNHead 6 | 7 | 8 | @HEADS.register_module() 9 | class NLHead(FCNHead): 10 | """Non-local Neural Networks. 11 | 12 | This head is the implementation of `NLNet 13 | `_. 14 | 15 | Args: 16 | reduction (int): Reduction factor of projection transform. Default: 2. 17 | use_scale (bool): Whether to scale pairwise_weight by 18 | sqrt(1/inter_channels). Default: True. 19 | mode (str): The nonlocal mode. Options are 'embedded_gaussian', 20 | 'dot_product'. Default: 'embedded_gaussian.'. 21 | """ 22 | 23 | def __init__(self, 24 | reduction=2, 25 | use_scale=True, 26 | mode='embedded_gaussian', 27 | **kwargs): 28 | super(NLHead, self).__init__(num_convs=2, **kwargs) 29 | self.reduction = reduction 30 | self.use_scale = use_scale 31 | self.mode = mode 32 | self.nl_block = NonLocal2d( 33 | in_channels=self.channels, 34 | reduction=self.reduction, 35 | use_scale=self.use_scale, 36 | conv_cfg=self.conv_cfg, 37 | norm_cfg=self.norm_cfg, 38 | mode=self.mode) 39 | 40 | def forward(self, inputs): 41 | """Forward function.""" 42 | x = self._transform_inputs(inputs) 43 | output = self.convs[0](x) 44 | output = self.nl_block(output) 45 | output = self.convs[1](output) 46 | if self.concat_input: 47 | output = self.conv_cat(torch.cat([x, output], dim=1)) 48 | output = self.cls_seg(output) 49 | return output 50 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/focal_loss/focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | import focal_loss_cuda 4 | 5 | 6 | class FocalLoss(torch.autograd.Function): 7 | @staticmethod 8 | def forward( 9 | ctx, 10 | cls_output, 11 | cls_targets_at_level, 12 | num_positives_sum, 13 | num_real_classes, 14 | alpha, 15 | gamma, 16 | label_smoothing=0.0, 17 | ): 18 | loss, partial_grad = focal_loss_cuda.forward( 19 | cls_output, 20 | cls_targets_at_level, 21 | num_positives_sum, 22 | num_real_classes, 23 | alpha, 24 | gamma, 25 | label_smoothing, 26 | ) 27 | 28 | ctx.save_for_backward(partial_grad, num_positives_sum) 29 | return loss 30 | 31 | @staticmethod 32 | def backward(ctx, grad_loss): 33 | partial_grad, num_positives_sum = ctx.saved_tensors 34 | 35 | # The backward kernel is actually in-place to save memory space, 36 | # partial_grad and grad_input are the same tensor. 37 | grad_input = focal_loss_cuda.backward(grad_loss, partial_grad, num_positives_sum) 38 | 39 | return grad_input, None, None, None, None, None, None 40 | 41 | 42 | def focal_loss( 43 | cls_output: torch.Tensor, 44 | cls_targets_at_level: torch.Tensor, 45 | num_positive_sum: torch.Tensor, 46 | num_real_classes: int, 47 | alpha: float, 48 | gamma: float, 49 | label_smoothing: float = 0.0, 50 | ) -> torch.Tensor: 51 | """Fused focal loss function.""" 52 | return FocalLoss.apply( 53 | cls_output, 54 | cls_targets_at_level, 55 | num_positive_sum, 56 | num_real_classes, 57 | alpha, 58 | gamma, 59 | label_smoothing, 60 | ) 61 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/focal_loss/focal_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | import focal_loss_cuda 4 | 5 | 6 | class FocalLoss(torch.autograd.Function): 7 | @staticmethod 8 | def forward( 9 | ctx, 10 | cls_output, 11 | cls_targets_at_level, 12 | num_positives_sum, 13 | num_real_classes, 14 | alpha, 15 | gamma, 16 | label_smoothing=0.0, 17 | ): 18 | loss, partial_grad = focal_loss_cuda.forward( 19 | cls_output, 20 | cls_targets_at_level, 21 | num_positives_sum, 22 | num_real_classes, 23 | alpha, 24 | gamma, 25 | label_smoothing, 26 | ) 27 | 28 | ctx.save_for_backward(partial_grad, num_positives_sum) 29 | return loss 30 | 31 | @staticmethod 32 | def backward(ctx, grad_loss): 33 | partial_grad, num_positives_sum = ctx.saved_tensors 34 | 35 | # The backward kernel is actually in-place to save memory space, 36 | # partial_grad and grad_input are the same tensor. 37 | grad_input = focal_loss_cuda.backward(grad_loss, partial_grad, num_positives_sum) 38 | 39 | return grad_input, None, None, None, None, None, None 40 | 41 | 42 | def focal_loss( 43 | cls_output: torch.Tensor, 44 | cls_targets_at_level: torch.Tensor, 45 | num_positive_sum: torch.Tensor, 46 | num_real_classes: int, 47 | alpha: float, 48 | gamma: float, 49 | label_smoothing: float = 0.0, 50 | ) -> torch.Tensor: 51 | """Fused focal loss function.""" 52 | return FocalLoss.apply( 53 | cls_output, 54 | cls_targets_at_level, 55 | num_positive_sum, 56 | num_real_classes, 57 | alpha, 58 | gamma, 59 | label_smoothing, 60 | ) 61 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/decode_heads/gc_head.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from mmcv.cnn import ContextBlock 3 | 4 | from ..builder import HEADS 5 | from .fcn_head import FCNHead 6 | 7 | 8 | @HEADS.register_module() 9 | class GCHead(FCNHead): 10 | """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. 11 | 12 | This head is the implementation of `GCNet 13 | `_. 14 | 15 | Args: 16 | ratio (float): Multiplier of channels ratio. Default: 1/4. 17 | pooling_type (str): The pooling type of context aggregation. 18 | Options are 'att', 'avg'. Default: 'avg'. 19 | fusion_types (tuple[str]): The fusion type for feature fusion. 20 | Options are 'channel_add', 'channel_mul'. Defautl: ('channel_add',) 21 | """ 22 | 23 | def __init__(self, 24 | ratio=1 / 4., 25 | pooling_type='att', 26 | fusion_types=('channel_add', ), 27 | **kwargs): 28 | super(GCHead, self).__init__(num_convs=2, **kwargs) 29 | self.ratio = ratio 30 | self.pooling_type = pooling_type 31 | self.fusion_types = fusion_types 32 | self.gc_block = ContextBlock( 33 | in_channels=self.channels, 34 | ratio=self.ratio, 35 | pooling_type=self.pooling_type, 36 | fusion_types=self.fusion_types) 37 | 38 | def forward(self, inputs): 39 | """Forward function.""" 40 | x = self._transform_inputs(inputs) 41 | output = self.convs[0](x) 42 | output = self.gc_block(output) 43 | output = self.convs[1](output) 44 | if self.concat_input: 45 | output = self.conv_cat(torch.cat([x, output], dim=1)) 46 | output = self.cls_seg(output) 47 | return output 48 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/transformer/utils.py: -------------------------------------------------------------------------------- 1 | """Utility functions used by both `pipeline_parallel` and `tensor_parallel`""" 2 | import torch 3 | 4 | from apex.transformer import parallel_state 5 | 6 | 7 | def ensure_divisibility(numerator, denominator): 8 | """Ensure that numerator is divisible by the denominator.""" 9 | assert numerator % denominator == 0, "{} is not divisible by {}".format( 10 | numerator, denominator 11 | ) 12 | 13 | 14 | def divide(numerator, denominator): 15 | """Ensure that numerator is divisible by the denominator and return 16 | the division value.""" 17 | ensure_divisibility(numerator, denominator) 18 | return numerator // denominator 19 | 20 | 21 | def split_tensor_into_1d_equal_chunks(tensor): 22 | """Break a tensor into equal 1D chunks.""" 23 | data = tensor.view(-1) 24 | partition_size = ( 25 | torch.numel(data) // parallel_state.get_tensor_model_parallel_world_size() 26 | ) 27 | start_index = partition_size * parallel_state.get_tensor_model_parallel_rank() 28 | end_index = start_index + partition_size 29 | return data[start_index:end_index] 30 | 31 | 32 | def gather_split_1d_tensor(tensor): 33 | """Opposite of above function, gather values from model parallel ranks.""" 34 | world_size = parallel_state.get_tensor_model_parallel_world_size() 35 | numel = torch.numel(tensor) 36 | numel_gathered = world_size * numel 37 | gathered = torch.empty( 38 | numel_gathered, 39 | dtype=tensor.dtype, 40 | device=torch.cuda.current_device(), 41 | requires_grad=False, 42 | ) 43 | chunks = [gathered[i * numel : (i + 1) * numel] for i in range(world_size)] 44 | torch.distributed.all_gather( 45 | chunks, tensor, group=parallel_state.get_tensor_model_parallel_group() 46 | ) 47 | return gathered 48 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/transformer/utils.py: -------------------------------------------------------------------------------- 1 | """Utility functions used by both `pipeline_parallel` and `tensor_parallel`""" 2 | import torch 3 | 4 | from apex.transformer import parallel_state 5 | 6 | 7 | def ensure_divisibility(numerator, denominator): 8 | """Ensure that numerator is divisible by the denominator.""" 9 | assert numerator % denominator == 0, "{} is not divisible by {}".format( 10 | numerator, denominator 11 | ) 12 | 13 | 14 | def divide(numerator, denominator): 15 | """Ensure that numerator is divisible by the denominator and return 16 | the division value.""" 17 | ensure_divisibility(numerator, denominator) 18 | return numerator // denominator 19 | 20 | 21 | def split_tensor_into_1d_equal_chunks(tensor): 22 | """Break a tensor into equal 1D chunks.""" 23 | data = tensor.view(-1) 24 | partition_size = ( 25 | torch.numel(data) // parallel_state.get_tensor_model_parallel_world_size() 26 | ) 27 | start_index = partition_size * parallel_state.get_tensor_model_parallel_rank() 28 | end_index = start_index + partition_size 29 | return data[start_index:end_index] 30 | 31 | 32 | def gather_split_1d_tensor(tensor): 33 | """Opposite of above function, gather values from model parallel ranks.""" 34 | world_size = parallel_state.get_tensor_model_parallel_world_size() 35 | numel = torch.numel(tensor) 36 | numel_gathered = world_size * numel 37 | gathered = torch.empty( 38 | numel_gathered, 39 | dtype=tensor.dtype, 40 | device=torch.cuda.current_device(), 41 | requires_grad=False, 42 | ) 43 | chunks = [gathered[i * numel : (i + 1) * numel] for i in range(world_size)] 44 | torch.distributed.all_gather( 45 | chunks, tensor, group=parallel_state.get_tensor_model_parallel_group() 46 | ) 47 | return gathered 48 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/nccl_p2p/nccl_p2p_cuda.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | #include 19 | #ifndef _nccl_p2p_h_ 20 | #define _nccl_p2p_h_ 21 | 22 | namespace apex { namespace contrib { namespace nccl_p2p { 23 | at::Tensor get_unique_nccl_id(int n); 24 | int init_nccl_comm( 25 | at::Tensor unique_nccl_id, 26 | int my_rank, 27 | int num_ranks 28 | ); 29 | void nccl_send( 30 | int handle, 31 | at::Tensor input, 32 | int destination 33 | ); 34 | void nccl_recv( 35 | int handle, 36 | at::Tensor input, 37 | int sender 38 | ); 39 | void left_right_halo_exchange_inplace( 40 | int handle, 41 | bool left_zero, 42 | bool right_zero, 43 | at::Tensor left_output_halo, 44 | at::Tensor right_output_halo, 45 | at::Tensor left_input_halo, 46 | at::Tensor right_input_halo, 47 | int group_size); 48 | std::vector left_right_halo_exchange( 49 | int handle, 50 | bool left_zero, 51 | bool right_zero, 52 | at::Tensor left_output_halo, 53 | at::Tensor right_output_halo, 54 | int group_size 55 | ); 56 | void add_delay(int delay); 57 | }}} 58 | #endif 59 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/csrc/xentropy/interface.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // CUDA forward declarations 4 | 5 | std::vector softmax_xentropy_cuda( 6 | const at::Tensor &input, 7 | const at::Tensor &labels, 8 | const float smoothing, 9 | const bool half_to_float); 10 | 11 | at::Tensor softmax_xentropy_backward_cuda( 12 | const at::Tensor &grad_loss, 13 | const at::Tensor &logits, 14 | const at::Tensor &max_log_sum_exp, 15 | const at::Tensor &labels, 16 | const float smoothing); 17 | 18 | // C++ interface 19 | 20 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 21 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 22 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 23 | 24 | std::vector softmax_xentropy_forward( 25 | const at::Tensor &input, 26 | const at::Tensor &labels, 27 | const float smoothing, 28 | const bool half_to_float) { 29 | CHECK_CUDA(input); 30 | CHECK_INPUT(labels); 31 | 32 | return softmax_xentropy_cuda(input, labels, smoothing, half_to_float); 33 | } 34 | 35 | at::Tensor softmax_xentropy_backward( 36 | const at::Tensor &grad_loss, 37 | const at::Tensor &logits, 38 | const at::Tensor &max_log_sum_exp, 39 | const at::Tensor &labels, 40 | const float smoothing) { 41 | CHECK_CUDA(grad_loss); 42 | CHECK_CUDA(logits); 43 | CHECK_INPUT(max_log_sum_exp); 44 | CHECK_INPUT(labels); 45 | 46 | return softmax_xentropy_backward_cuda(grad_loss, logits, max_log_sum_exp, labels, smoothing); 47 | } 48 | 49 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 50 | m.def("forward", &softmax_xentropy_forward, "Softmax cross entropy loss with label smoothing forward (CUDA)"); 51 | m.def("backward", &softmax_xentropy_backward, "Softmax cross entropy loss with label smoothing backward (CUDA)"); 52 | } 53 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/test/fused_dense/test_fused_dense.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import unittest 3 | import torch.nn.functional as F 4 | from apex import fused_dense 5 | from torch import nn 6 | from apex import amp 7 | 8 | class FusedDenseTest(unittest.TestCase): 9 | def setUp(self, seed=0): 10 | torch.manual_seed(seed) 11 | #torch.cuda.manual_seed_all(seed) 12 | 13 | self.seq_length = 512 14 | self.sequences = 3 15 | self.hidden_dim = 1024 16 | 17 | self.ref_inputs = torch.randn(self.sequences*self.seq_length, self.hidden_dim, 18 | dtype=torch.float16, device=torch.device("cuda")).int().half().requires_grad_(True) 19 | 20 | self.tst_inputs = self.ref_inputs.clone().detach().requires_grad_(True) 21 | self.dense = fused_dense.FusedDense(1024, 3072) 22 | self.dense.half() 23 | self.dense.cuda() 24 | 25 | 26 | def test_fused_dense(self) : 27 | y_tst = self.dense(self.tst_inputs) 28 | y_ref = torch.matmul(self.ref_inputs,self.dense.weight.t())+self.dense.bias 29 | dy = torch.randn_like(y_tst).half() 30 | y_tst.backward(dy) 31 | dw_ref = torch.matmul(dy.t(), self.ref_inputs) 32 | dx_ref = torch.matmul(dy, self.dense.weight.clone()) 33 | db_ref = dy.sum(0, False) 34 | 35 | 36 | self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)) 37 | self.assertTrue(torch.allclose(y_ref, y_tst, atol=1e-3, rtol=1e-3, equal_nan=True)) 38 | self.assertTrue(torch.allclose(dw_ref, self.dense.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)) 39 | self.assertTrue(torch.allclose(dx_ref, self.tst_inputs.grad, atol=1e-3, rtol=1e-3, equal_nan=True)) 40 | self.assertTrue(torch.allclose(db_ref, self.dense.bias.grad, atol=1e-3, rtol=1e-3, equal_nan=True)) 41 | 42 | 43 | if __name__ == '__main__': 44 | unittest.main() 45 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/layer_norm/layer_norm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import init 3 | 4 | from apex._autocast_utils import _cast_if_autocast_enabled 5 | import fast_layer_norm 6 | 7 | 8 | class FastLayerNormFN(torch.autograd.Function): 9 | @staticmethod 10 | def forward(ctx, x, gamma, beta, epsilon): 11 | x = x.contiguous() 12 | gamma = gamma.contiguous() 13 | beta = beta.contiguous() 14 | hidden_size = gamma.numel() 15 | xmat = x.view((-1, hidden_size)) 16 | ymat, mu, rsigma = fast_layer_norm.ln_fwd(xmat, gamma, beta, epsilon) 17 | ctx.save_for_backward(x, gamma, mu, rsigma) 18 | return ymat.view(x.shape) 19 | 20 | @staticmethod 21 | def backward(ctx, dy): 22 | # assert dy.is_contiguous() 23 | dy = dy.contiguous() # this happens! 24 | x, gamma, mu, rsigma = ctx.saved_tensors 25 | 26 | hidden_size = gamma.numel() 27 | xmat = x.view((-1, hidden_size)) 28 | dymat = dy.view(xmat.shape) 29 | dxmat, dgamma, dbeta, _, _ = fast_layer_norm.ln_bwd(dymat, xmat, mu, rsigma, gamma) 30 | dx = dxmat.view(x.shape) 31 | return dx, dgamma, dbeta, None 32 | 33 | 34 | def _fast_layer_norm(x, weight, bias, epsilon): 35 | args = _cast_if_autocast_enabled(x, weight, bias, epsilon) 36 | with torch.cuda.amp.autocast(enabled=False): 37 | return FastLayerNormFN.apply(*args) 38 | 39 | 40 | class FastLayerNorm(torch.nn.Module): 41 | def __init__(self, hidden_size, eps=1e-5): 42 | super().__init__() 43 | self.epsilon = eps 44 | self.weight = torch.nn.Parameter(torch.Tensor(hidden_size)) 45 | self.bias = torch.nn.Parameter(torch.Tensor(hidden_size)) 46 | self.reset_parameters() 47 | 48 | def reset_parameters(self): 49 | init.ones_(self.weight) 50 | init.zeros_(self.bias) 51 | 52 | def forward(self, x): 53 | return _fast_layer_norm(x, self.weight, self.bias, self.epsilon) 54 | -------------------------------------------------------------------------------- /SurgNetCode/apex/build/lib/apex/contrib/layer_norm/layer_norm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import init 3 | 4 | from apex._autocast_utils import _cast_if_autocast_enabled 5 | import fast_layer_norm 6 | 7 | 8 | class FastLayerNormFN(torch.autograd.Function): 9 | @staticmethod 10 | def forward(ctx, x, gamma, beta, epsilon): 11 | x = x.contiguous() 12 | gamma = gamma.contiguous() 13 | beta = beta.contiguous() 14 | hidden_size = gamma.numel() 15 | xmat = x.view((-1, hidden_size)) 16 | ymat, mu, rsigma = fast_layer_norm.ln_fwd(xmat, gamma, beta, epsilon) 17 | ctx.save_for_backward(x, gamma, mu, rsigma) 18 | return ymat.view(x.shape) 19 | 20 | @staticmethod 21 | def backward(ctx, dy): 22 | # assert dy.is_contiguous() 23 | dy = dy.contiguous() # this happens! 24 | x, gamma, mu, rsigma = ctx.saved_tensors 25 | 26 | hidden_size = gamma.numel() 27 | xmat = x.view((-1, hidden_size)) 28 | dymat = dy.view(xmat.shape) 29 | dxmat, dgamma, dbeta, _, _ = fast_layer_norm.ln_bwd(dymat, xmat, mu, rsigma, gamma) 30 | dx = dxmat.view(x.shape) 31 | return dx, dgamma, dbeta, None 32 | 33 | 34 | def _fast_layer_norm(x, weight, bias, epsilon): 35 | args = _cast_if_autocast_enabled(x, weight, bias, epsilon) 36 | with torch.cuda.amp.autocast(enabled=False): 37 | return FastLayerNormFN.apply(*args) 38 | 39 | 40 | class FastLayerNorm(torch.nn.Module): 41 | def __init__(self, hidden_size, eps=1e-5): 42 | super().__init__() 43 | self.epsilon = eps 44 | self.weight = torch.nn.Parameter(torch.Tensor(hidden_size)) 45 | self.bias = torch.nn.Parameter(torch.Tensor(hidden_size)) 46 | self.reset_parameters() 47 | 48 | def reset_parameters(self): 49 | init.ones_(self.weight) 50 | init.zeros_(self.bias) 51 | 52 | def forward(self, x): 53 | return _fast_layer_norm(x, self.weight, self.bias, self.epsilon) 54 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/L0/run_test.py: -------------------------------------------------------------------------------- 1 | """L0 Tests Runner. 2 | 3 | How to run this script? 4 | 5 | 1. Run all the tests: `python /path/to/apex/tests/L0/run_test.py` 6 | 2. Run one of the tests (e.g. fused layer norm): 7 | `python /path/to/apex/tests/L0/run_test.py --include run_fused_layer_norm` 8 | 3. Run two or more of the tests (e.g. optimizers and fused layer norm): 9 | `python /path/to/apex/tests/L0/run_test.py --include run_optimizers run_fused_layer_norm` 10 | """ 11 | import argparse 12 | import os 13 | import unittest 14 | import sys 15 | 16 | 17 | TEST_ROOT = os.path.dirname(os.path.abspath(__file__)) 18 | TEST_DIRS = [ 19 | "run_amp", 20 | "run_fp16util", 21 | "run_optimizers", 22 | "run_fused_layer_norm", 23 | "run_mlp", 24 | "run_transformer", 25 | ] 26 | DEFAULT_TEST_DIRS = [ 27 | "run_optimizers", 28 | "run_fused_layer_norm", 29 | "run_mlp", 30 | "run_transformer", 31 | ] 32 | 33 | 34 | def parse_args(): 35 | parser = argparse.ArgumentParser( 36 | description="L0 test runner", 37 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 38 | ) 39 | parser.add_argument( 40 | "--include", 41 | nargs="+", 42 | choices=TEST_DIRS, 43 | default=DEFAULT_TEST_DIRS, 44 | help="select a set of tests to run (defaults to ALL tests).", 45 | ) 46 | args, _ = parser.parse_known_args() 47 | return args 48 | 49 | 50 | def main(args): 51 | runner = unittest.TextTestRunner(verbosity=2) 52 | errcode = 0 53 | for test_dir in args.include: 54 | test_dir = os.path.join(TEST_ROOT, test_dir) 55 | print(test_dir) 56 | suite = unittest.TestLoader().discover(test_dir) 57 | 58 | print("\nExecuting tests from " + test_dir) 59 | 60 | result = runner.run(suite) 61 | 62 | if not result.wasSuccessful(): 63 | errcode = 1 64 | 65 | sys.exit(errcode) 66 | 67 | 68 | if __name__ == '__main__': 69 | args = parse_args() 70 | main(args) 71 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/contrib/test/multihead_attn/test_mha_fused_softmax.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import unittest 3 | import torch.nn.functional as F 4 | from apex.contrib.multihead_attn import fast_mask_softmax_dropout_func 5 | 6 | class FusedSoftmaxTest(unittest.TestCase): 7 | def setUp(self, seed=1234): 8 | torch.manual_seed(seed) 9 | torch.cuda.manual_seed_all(seed) 10 | 11 | self.seq_length = 80 12 | self.sequences = 10 13 | self.hidden_dim = 1024 14 | self.heads = 16 15 | self.dropout_prob = 0.0 16 | 17 | self.mask = (torch.randn(self.sequences,self.seq_length)>0).cuda() 18 | self.mask = self.mask.half()*-10000 19 | self.ref_inputs = torch.randn(self.heads * self.sequences, self.seq_length, self.seq_length, 20 | dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True) 21 | 22 | self.tst_inputs = self.ref_inputs.clone().detach().requires_grad_(True) 23 | 24 | def test_fused_softmax(self) : 25 | grads = torch.randn_like(self.tst_inputs) 26 | y_ref = self.ref_inputs.view(self.sequences, self.heads, self.seq_length, self.seq_length) 27 | y_ref = y_ref + self.mask.unsqueeze(1).unsqueeze(2) 28 | y_ref = y_ref.view(self.sequences*self.heads, self.seq_length, self.seq_length) 29 | y_ref = F.softmax(y_ref, dim=-1) 30 | y_ref = torch._fused_dropout(y_ref, 1.0) 31 | 32 | y_tst = fast_mask_softmax_dropout_func(True, self.heads, self.tst_inputs, self.mask, True, 0.0) 33 | y_ref[0].backward(grads) 34 | y_tst.backward(grads) 35 | 36 | self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)) 37 | self.assertTrue(torch.allclose(y_ref[0], y_tst, atol=1e-3, rtol=1e-3)) 38 | self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)) 39 | 40 | 41 | if __name__ == '__main__': 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /SurgNetCode/apex/tests/docker_extension_builds/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | print_banner() { 4 | printf "\n\n\n\e[30m\e[42m$1\e[0m\n\n\n\n" 5 | } 6 | 7 | print_green() { 8 | printf "\e[30m\e[42m$1\e[0m\n" 9 | } 10 | 11 | print_red() { 12 | printf "\e[30m\e[41m$1\e[0m\n" 13 | } 14 | 15 | images=( 16 | "gitlab-master.nvidia.com:5005/dl/dgx/pytorch:19.08-py3-devel" 17 | "gitlab-master.nvidia.com:5005/dl/dgx/pytorch:master-py3-devel" 18 | "pytorch/pytorch:nightly-devel-cuda10.0-cudnn7" 19 | "pytorch/pytorch:1.1.0-cuda10.0-cudnn7.5-devel" 20 | "pytorch/pytorch:1.0.1-cuda10.0-cudnn7-devel" 21 | "pytorch/pytorch:1.0-cuda10.0-cudnn7-devel" 22 | "pytorch/pytorch:nightly-devel-cuda9.2-cudnn7" 23 | ) 24 | 25 | branch="master" 26 | 27 | # Associative array for exit codes 28 | declare -A exit_codes 29 | for image in images 30 | do 31 | exit_codes[$image]="None" 32 | done 33 | 34 | for image in "${images[@]}" 35 | do 36 | print_banner "$image" 37 | set -x 38 | docker pull $image 39 | # Trying python setup.py install instead of pip install to ensure direct access to error codes. 40 | # Maybe pip install would be ok too but this works. 41 | docker run --runtime=nvidia --rm $image /bin/bash -c "yes | pip uninstall apex; yes | pip uninstall apex; git clone https://github.com/NVIDIA/apex.git; cd apex; git checkout $branch; set -e; python setup.py install --cuda_ext --cpp_ext" 42 | exit_code=$? 43 | set +x 44 | if [ $exit_code != 0 ] 45 | then 46 | print_red "Exit code: $exit_code" 47 | else 48 | print_green "Exit code: $exit_code" 49 | fi 50 | exit_codes[$image]=$exit_code 51 | done 52 | 53 | success=0 54 | for image in "${images[@]}" 55 | do 56 | exit_code=${exit_codes[$image]} 57 | if [ $exit_code != 0 ] 58 | then 59 | print_red "$image : $exit_code" 60 | success=1 61 | else 62 | print_green "$image : $exit_code" 63 | fi 64 | done 65 | 66 | if [ $success != 0 ] 67 | then 68 | print_red "Overall status: failure" 69 | else 70 | print_green "Overall status: success" 71 | fi 72 | 73 | exit $success 74 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from mmcv.utils import get_logger 4 | 5 | 6 | def get_root_logger(log_file=None, log_level=logging.INFO): 7 | """Get the root logger. 8 | 9 | The logger will be initialized if it has not been initialized. By default a 10 | StreamHandler will be added. If `log_file` is specified, a FileHandler will 11 | also be added. The name of the root logger is the top-level package name, 12 | e.g., "mmseg". 13 | 14 | Args: 15 | log_file (str | None): The log filename. If specified, a FileHandler 16 | will be added to the root logger. 17 | log_level (int): The root logger level. Note that only the process of 18 | rank 0 is affected, while other processes will set the level to 19 | "Error" and be silent most of the time. 20 | 21 | Returns: 22 | logging.Logger: The root logger. 23 | """ 24 | 25 | logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level) 26 | 27 | return logger 28 | 29 | def print_log(msg, logger=None, level=logging.INFO): 30 | """Print a log message. 31 | Args: 32 | msg (str): The message to be logged. 33 | logger (logging.Logger | str | None): The logger to be used. Some 34 | special loggers are: 35 | - "root": the root logger obtained with `get_root_logger()`. 36 | - "silent": no message will be printed. 37 | - None: The `print()` method will be used to print log messages. 38 | level (int): Logging level. Only available when `logger` is a Logger 39 | object or "root". 40 | """ 41 | if logger is None: 42 | print(msg) 43 | elif logger == 'root': 44 | _logger = get_root_logger() 45 | _logger.log(level, msg) 46 | elif isinstance(logger, logging.Logger): 47 | logger.log(level, msg) 48 | elif logger != 'silent': 49 | raise TypeError( 50 | 'logger should be either a logging.Logger object, "root", ' 51 | '"silent" or None, but got {}'.format(logger)) -------------------------------------------------------------------------------- /SurgNetCode/mmseg/models/builder.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from mmcv.utils import Registry, build_from_cfg 4 | from torch import nn 5 | 6 | BACKBONES = Registry('backbone') 7 | NECKS = Registry('neck') 8 | HEADS = Registry('head') 9 | LOSSES = Registry('loss') 10 | SEGMENTORS = Registry('segmentor') 11 | 12 | 13 | def build(cfg, registry, default_args=None): 14 | """Build a module. 15 | 16 | Args: 17 | cfg (dict, list[dict]): The config of modules, is is either a dict 18 | or a list of configs. 19 | registry (:obj:`Registry`): A registry the module belongs to. 20 | default_args (dict, optional): Default arguments to build the module. 21 | Defaults to None. 22 | 23 | Returns: 24 | nn.Module: A built nn module. 25 | """ 26 | 27 | if isinstance(cfg, list): 28 | modules = [ 29 | build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg 30 | ] 31 | return nn.Sequential(*modules) 32 | else: 33 | return build_from_cfg(cfg, registry, default_args) 34 | 35 | 36 | def build_backbone(cfg): 37 | """Build backbone.""" 38 | return build(cfg, BACKBONES) 39 | 40 | 41 | def build_neck(cfg): 42 | """Build neck.""" 43 | return build(cfg, NECKS) 44 | 45 | 46 | def build_head(cfg): 47 | """Build head.""" 48 | return build(cfg, HEADS) 49 | 50 | 51 | def build_loss(cfg): 52 | """Build loss.""" 53 | return build(cfg, LOSSES) 54 | 55 | 56 | def build_segmentor(cfg, train_cfg=None, test_cfg=None): 57 | """Build segmentor.""" 58 | if train_cfg is not None or test_cfg is not None: 59 | warnings.warn( 60 | 'train_cfg and test_cfg is deprecated, ' 61 | 'please specify them in model', UserWarning) 62 | assert cfg.get('train_cfg') is None or train_cfg is None, \ 63 | 'train_cfg specified in both outer field and model field ' 64 | assert cfg.get('test_cfg') is None or test_cfg is None, \ 65 | 'test_cfg specified in both outer field and model field ' 66 | return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) 67 | -------------------------------------------------------------------------------- /SurgNetCode/mmseg/ops/wrappers.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | def resize(input, 9 | size=None, 10 | scale_factor=None, 11 | mode='nearest', 12 | align_corners=None, 13 | warning=True): 14 | if warning: 15 | if size is not None and align_corners: 16 | input_h, input_w = tuple(int(x) for x in input.shape[2:]) 17 | output_h, output_w = tuple(int(x) for x in size) 18 | if output_h > input_h or output_w > output_h: 19 | if ((output_h > 1 and output_w > 1 and input_h > 1 20 | and input_w > 1) and (output_h - 1) % (input_h - 1) 21 | and (output_w - 1) % (input_w - 1)): 22 | warnings.warn( 23 | f'When align_corners={align_corners}, ' 24 | 'the output would more aligned if ' 25 | f'input size {(input_h, input_w)} is `x+1` and ' 26 | f'out size {(output_h, output_w)} is `nx+1`') 27 | if isinstance(size, torch.Size): 28 | size = tuple(int(x) for x in size) 29 | return F.interpolate(input, size, scale_factor, mode, align_corners) 30 | 31 | 32 | class Upsample(nn.Module): 33 | 34 | def __init__(self, 35 | size=None, 36 | scale_factor=None, 37 | mode='nearest', 38 | align_corners=None): 39 | super(Upsample, self).__init__() 40 | self.size = size 41 | if isinstance(scale_factor, tuple): 42 | self.scale_factor = tuple(float(factor) for factor in scale_factor) 43 | else: 44 | self.scale_factor = float(scale_factor) if scale_factor else None 45 | self.mode = mode 46 | self.align_corners = align_corners 47 | 48 | def forward(self, x): 49 | if not self.size: 50 | size = [int(t * self.scale_factor) for t in x.shape[-2:]] 51 | else: 52 | size = self.size 53 | return resize(x, size, None, self.mode, self.align_corners) 54 | -------------------------------------------------------------------------------- /SurgNetCode/apex/examples/docker/README.md: -------------------------------------------------------------------------------- 1 | ## Option 1: Create a new container with Apex 2 | 3 | **Dockerfile** installs the latest Apex on top of an existing image. Run 4 | ``` 5 | docker build -t new_image_with_apex . 6 | ``` 7 | By default, **Dockerfile** uses NVIDIA's Pytorch container as the base image, 8 | which requires an NVIDIA GPU Cloud (NGC) account. If you don't have an NGC account, you can sign up for free by following the instructions [here](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html#generating-api-key). 9 | 10 | Alternatively, you can supply your own base image via the `BASE_IMAGE` build-arg. 11 | `BASE_IMAGE` must have Pytorch and Cuda installed. For example, any 12 | `-devel` image for Pytorch 1.0 and later from the 13 | [official Pytorch Dockerhub](https://hub.docker.com/r/pytorch/pytorch) may be used: 14 | ``` 15 | docker build --build-arg BASE_IMAGE=1.3-cuda10.1-cudnn7-devel -t new_image_with_apex . 16 | ``` 17 | 18 | If you want to rebuild your image, and force the latest Apex to be cloned and installed, make any small change to the `SHA` variable in **Dockerfile**. 19 | 20 | **Warning:** 21 | Currently, the non-`-devel` images on Pytorch Dockerhub do not contain the Cuda compiler `nvcc`. Therefore, 22 | images whose name does not contain `-devel` are not eligible candidates for `BASE_IMAGE`. 23 | 24 | ### Running your Apex container 25 | 26 | Like any Cuda-enabled Pytorch container, a container with Apex should be run via [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), for example: 27 | ``` 28 | docker run --runtime=nvidia -it --rm --ipc=host new_image_with_apex 29 | ``` 30 | 31 | ## Option 2: Install Apex in a running container 32 | 33 | Instead of building a new container, it is also a viable option to `git clone https://github.com/NVIDIA/apex.git` on bare metal, mount the Apex repo into your container at launch by running, for example, 34 | ``` 35 | docker run --runtime=nvidia -it --rm --ipc=host -v /bare/metal/apex:/apex/in/container 36 | ``` 37 | then go to /apex/in/container within the running container and 38 | ``` 39 | pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . 40 | ``` 41 | -------------------------------------------------------------------------------- /SurgNetCode/apex/apex/amp/rnn_compat.py: -------------------------------------------------------------------------------- 1 | from . import utils, wrap 2 | 3 | import torch 4 | _VF = torch._C._VariableFunctions 5 | RNN_NAMES = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm'] 6 | 7 | def _gen_VF_wrapper(name): 8 | def wrapper(*args, **kwargs): 9 | return getattr(_VF, name)(*args, **kwargs) 10 | return wrapper 11 | 12 | # Some python magic to generate an object that has the rnn cell functions 13 | # defined on it, all of which call into corresponding _VF version. 14 | # Intended to patch torch.nn.modules.rnn._VF (aka, the ref named "_VF" 15 | # imported at module scope within torch.nn.modules.rnn). This should 16 | # not affect third-party importers of _VF.py. 17 | class VariableFunctionsShim(object): 18 | def __init__(self): 19 | for name in RNN_NAMES: 20 | for suffix in ['', '_cell']: 21 | fn_name = name + suffix 22 | setattr(self, fn_name, _gen_VF_wrapper(fn_name)) 23 | 24 | def has_old_rnns(): 25 | try: 26 | torch.nn.backends.thnn.backend.LSTMCell 27 | return True 28 | except: 29 | return False 30 | 31 | def whitelist_rnn_cells(handle, verbose): 32 | # Different module + function names in old/new RNN cases 33 | if has_old_rnns(): 34 | fn_names = ['RNNReLUCell', 'RNNTanhCell', 'LSTMCell', 'GRUCell'] 35 | mod = torch.nn.backends.thnn.backend 36 | else: 37 | fn_names = [x + '_cell' for x in RNN_NAMES] 38 | mod = torch.nn.modules.rnn._VF 39 | assert isinstance(mod, VariableFunctionsShim) 40 | 41 | # Insert casts on cell functions 42 | for fn in fn_names: 43 | wrap.cached_cast(mod, fn, utils.maybe_half, handle, 44 | try_caching=True, verbose=verbose) 45 | 46 | if has_old_rnns(): 47 | # Special handling of `backward` for fused gru / lstm: 48 | # The `backward` method calls Tensor.sum() (blacklist) internally, 49 | # and then the resulting grad_input has the wrong type. 50 | # TODO: where else is this a problem? 51 | for rnn_type in ['GRUFused', 'LSTMFused']: 52 | mod = getattr(torch.nn._functions.thnn.rnnFusedPointwise, rnn_type) 53 | wrap.disable_casts(mod, 'backward', handle) 54 | --------------------------------------------------------------------------------