├── up
├── utils
│ ├── env
│ │ ├── __init__.py
│ │ └── analysis_utils.py
│ ├── model
│ │ ├── optim
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ └── act_fn.py
│ ├── __init__.py
│ └── general
│ │ ├── __init__.py
│ │ ├── fake_linklink.py
│ │ ├── context.py
│ │ ├── global_flag.py
│ │ ├── checkpoint.py
│ │ └── registry_factory.py
├── tasks
│ ├── nas
│ │ ├── bignas
│ │ │ ├── utils
│ │ │ │ ├── __init__.py
│ │ │ │ └── registry_factory.py
│ │ │ ├── README.md
│ │ │ ├── controller
│ │ │ │ ├── utils
│ │ │ │ │ └── __init__.py
│ │ │ │ └── __init__.py
│ │ │ ├── runner
│ │ │ │ └── __init__.py
│ │ │ ├── models
│ │ │ │ ├── ops
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── search_space
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── necks
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── backbone
│ │ │ │ │ └── __init__.py
│ │ │ │ └── heads
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── big_clshead.py
│ │ │ └── __init__.py
│ │ ├── metax
│ │ │ ├── commands
│ │ │ │ └── __init__.py
│ │ │ ├── models
│ │ │ │ ├── decoder
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── deeplab.py
│ │ │ │ ├── __init__.py
│ │ │ │ └── backbones
│ │ │ │ │ ├── ssds
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base_ssd.py
│ │ │ │ │ └── xmnet_ssd.py
│ │ │ │ │ └── __init__.py
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── cls
│ │ ├── models
│ │ │ ├── heads
│ │ │ │ └── __init__.py
│ │ │ ├── losses
│ │ │ │ └── __init__.py
│ │ │ ├── postprocess
│ │ │ │ └── __init__.py
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ └── data
│ │ │ ├── __init__.py
│ │ │ ├── cls_dataloader.py
│ │ │ └── data_utils.py
│ ├── det
│ │ ├── models
│ │ │ ├── necks
│ │ │ │ └── __init__.py
│ │ │ ├── heads
│ │ │ │ ├── bbox_head
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── roi_head
│ │ │ │ │ └── __init__.py
│ │ │ │ └── __init__.py
│ │ │ ├── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── nms_wrapper.py
│ │ │ │ └── assigner.py
│ │ │ ├── __init__.py
│ │ │ ├── postprocess
│ │ │ │ └── __init__.py
│ │ │ └── losses
│ │ │ │ ├── __init__.py
│ │ │ │ └── smooth_l1_loss.py
│ │ ├── plugins
│ │ │ ├── cascade
│ │ │ │ ├── __init__.py
│ │ │ │ └── models
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── head
│ │ │ │ │ └── __init__.py
│ │ │ ├── fcos
│ │ │ │ ├── __init__.py
│ │ │ │ └── models
│ │ │ │ │ ├── head
│ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── postprocess
│ │ │ │ │ └── __init__.py
│ │ │ ├── yolov6
│ │ │ │ ├── __init__.py
│ │ │ │ └── models
│ │ │ │ │ ├── neck
│ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── backbone
│ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── head
│ │ │ │ │ └── __init__.py
│ │ │ │ │ └── __init__.py
│ │ │ ├── condinst
│ │ │ │ ├── __init__.py
│ │ │ │ └── models
│ │ │ │ │ ├── head
│ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── postprocess
│ │ │ │ │ └── __init__.py
│ │ │ ├── efl
│ │ │ │ ├── models
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── losses
│ │ │ │ │ │ └── __init__.py
│ │ │ │ ├── utils
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── optimizer_helper.py
│ │ │ │ └── __init__.py
│ │ │ ├── yolov5
│ │ │ │ ├── data
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── models
│ │ │ │ │ ├── neck
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── backbone
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── head
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── postprocess
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── initializer.py
│ │ │ │ ├── utils
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── optimizer_helper.py
│ │ │ │ └── __init__.py
│ │ │ ├── yolox
│ │ │ │ ├── data
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── models
│ │ │ │ │ ├── neck
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── head
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── backbone
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── preprocess
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ ├── postprocess
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── __init__.py
│ │ │ │ └── utils
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── optimizer_helper.py
│ │ │ │ │ └── hook_helper.py
│ │ │ ├── onenet
│ │ │ │ ├── data
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── models
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── postprocess
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── onenet_predictor.py
│ │ │ │ └── __init__.py
│ │ │ └── __init__.py
│ │ ├── data
│ │ │ ├── __init__.py
│ │ │ ├── metrics
│ │ │ │ └── __init__.py
│ │ │ └── datasets
│ │ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── quant
│ │ ├── models
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ ├── runner
│ │ │ └── __init__.py
│ │ └── deploy
│ │ │ ├── __init__.py
│ │ │ └── quant_deploy.py
│ ├── seg
│ │ ├── models
│ │ │ ├── losses
│ │ │ │ └── __init__.py
│ │ │ ├── decoder
│ │ │ │ └── __init__.py
│ │ │ ├── encoder
│ │ │ │ └── __init__.py
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ ├── shape_convert.py
│ │ │ └── optimizer_helper.py
│ │ └── data
│ │ │ ├── __init__.py
│ │ │ └── seg_dataloader.py
│ ├── ssl
│ │ ├── models
│ │ │ ├── losses
│ │ │ │ └── __init__.py
│ │ │ ├── postprocess
│ │ │ │ ├── __init__.py
│ │ │ │ └── ssl_postprocess.py
│ │ │ └── __init__.py
│ │ ├── data
│ │ │ ├── __init__.py
│ │ │ └── ssl_dataset.py
│ │ ├── __init__.py
│ │ └── wrapper
│ │ │ ├── __init__.py
│ │ │ ├── mae.py
│ │ │ └── simclr.py
│ ├── sparse
│ │ ├── models
│ │ │ ├── heads
│ │ │ │ ├── __init__.py
│ │ │ │ └── cls_head.py
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ └── runner
│ │ │ └── __init__.py
│ ├── det_3d
│ │ ├── models
│ │ │ ├── losses
│ │ │ │ └── __init__.py
│ │ │ ├── backbones_2d
│ │ │ │ └── __init__.py
│ │ │ ├── heads
│ │ │ │ └── __init__.py
│ │ │ ├── backbones_3d
│ │ │ │ ├── vfe
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── mean_vfe.py
│ │ │ │ ├── __init__.py
│ │ │ │ └── map_to_bev.py
│ │ │ ├── postprocess
│ │ │ │ ├── __init__.py
│ │ │ │ ├── anchor_head_postprocess
│ │ │ │ │ └── __init__.py
│ │ │ │ └── center_head_postprocess
│ │ │ │ │ └── __init__.py
│ │ │ ├── __init__.py
│ │ │ └── utils
│ │ │ │ └── __init__.py
│ │ ├── runner
│ │ │ ├── __init__.py
│ │ │ └── point_runner.py
│ │ ├── data
│ │ │ ├── dataset
│ │ │ │ └── __init__.py
│ │ │ ├── metrics
│ │ │ │ ├── __init__.py
│ │ │ │ └── kitti_object_eval_python
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── evaluate.py
│ │ │ │ │ └── README.md
│ │ │ ├── __init__.py
│ │ │ ├── data_loader.py
│ │ │ └── data_utils.py
│ │ └── __init__.py
│ ├── distill
│ │ ├── runner
│ │ │ └── __init__.py
│ │ ├── models
│ │ │ └── __init__.py
│ │ ├── losses
│ │ │ ├── __init__.py
│ │ │ ├── l2_loss.py
│ │ │ └── ce_loss.py
│ │ └── __init__.py
│ ├── multitask
│ │ ├── runner
│ │ │ └── __init__.py
│ │ ├── models
│ │ │ ├── wrappers
│ │ │ │ ├── __init__.py
│ │ │ │ └── wrapper_utils.py
│ │ │ ├── __init__.py
│ │ │ └── union_heads
│ │ │ │ ├── __init__.py
│ │ │ │ └── utils.py
│ │ └── __init__.py
│ └── __init__.py
├── apis
│ └── __init__.py
├── runner
│ └── __init__.py
├── data
│ ├── metrics
│ │ └── __init__.py
│ ├── datasets
│ │ └── __init__.py
│ ├── samplers
│ │ └── __init__.py
│ └── __init__.py
├── models
│ ├── __init__.py
│ └── losses
│ │ ├── __init__.py
│ │ └── loss.py
├── extensions
│ ├── ext.py
│ ├── csrc
│ │ ├── iou3d_nms
│ │ │ ├── iou3d_cpu.h
│ │ │ ├── iou3d_nms_api.cpp
│ │ │ └── iou3d_nms.h
│ │ ├── roi_align
│ │ │ ├── roi_align.cpp
│ │ │ ├── roi_align_cuda.cpp
│ │ │ └── roi_align.h
│ │ ├── psroi_pooling
│ │ │ ├── psroi_pooling.cpp
│ │ │ └── psroi_pooling.h
│ │ ├── deform_conv
│ │ │ └── deform_conv.cpp
│ │ ├── roipoint_pool3d
│ │ │ ├── roipoint_pool3d.h
│ │ │ └── roipoint_pool3d.cpp
│ │ ├── nms
│ │ │ └── nms.h
│ │ ├── iou_overlap
│ │ │ ├── iou_overlap.h
│ │ │ └── iou_overlap.cpp
│ │ ├── roiaware_pool3d
│ │ │ └── roiaware_pool3d.h
│ │ ├── softer_nms
│ │ │ └── softer_nms.h
│ │ └── cross_focal_loss
│ │ │ ├── cross_focal_loss_cuda.cpp
│ │ │ └── cross_focal_loss.h
│ └── python
│ │ ├── iou_overlap.py
│ │ └── nms.py
├── commands
│ ├── __init__.py
│ ├── subcommand.py
│ ├── eval.py
│ └── quant_deploy.py
├── __main__.py
└── __init__.py
├── up-logo.png
├── docs
├── source
│ ├── logo.png
│ ├── Chinese
│ │ ├── logo.png
│ │ ├── installation
│ │ │ └── index.rst
│ │ ├── usefultools
│ │ │ ├── index.rst
│ │ │ └── visualization
│ │ │ │ └── index.rst
│ │ ├── tasks
│ │ │ ├── index.rst
│ │ │ └── tasks
│ │ │ │ ├── 3ddet.rst
│ │ │ │ ├── quant.rst
│ │ │ │ ├── seg.rst
│ │ │ │ ├── sparse.rst
│ │ │ │ ├── multitask.rst
│ │ │ │ └── ssl.rst
│ │ ├── tutorials
│ │ │ ├── index.rst
│ │ │ └── guide
│ │ │ │ ├── loss.rst
│ │ │ │ ├── saver.rst
│ │ │ │ ├── fp16.rst
│ │ │ │ ├── normalization.rst
│ │ │ │ ├── trainer.rst
│ │ │ │ ├── environment.rst
│ │ │ │ └── augmentations.rst
│ │ ├── index.rst
│ │ └── benchmark
│ │ │ └── index.rst
│ ├── English
│ │ ├── logo.png
│ │ ├── installation
│ │ │ └── index.rst
│ │ ├── usefultools
│ │ │ ├── index.rst
│ │ │ └── visualization
│ │ │ │ └── index.rst
│ │ ├── tasks
│ │ │ ├── index.rst
│ │ │ └── tasks
│ │ │ │ ├── 3ddet.rst
│ │ │ │ ├── quant.rst
│ │ │ │ ├── seg.rst
│ │ │ │ ├── sparse.rst
│ │ │ │ ├── multitask.rst
│ │ │ │ └── ssl.rst
│ │ ├── tutorials
│ │ │ ├── index.rst
│ │ │ └── guide
│ │ │ │ ├── loss.rst
│ │ │ │ ├── saver.rst
│ │ │ │ ├── normalization.rst
│ │ │ │ ├── fp16.rst
│ │ │ │ ├── trainer.rst
│ │ │ │ └── environment.rst
│ │ ├── index.rst
│ │ └── benchmark
│ │ │ └── index.rst
│ └── index.rst
├── Makefile
└── make.bat
├── README.md
├── scripts
├── dist_inference.sh
├── dist_train.sh
├── dist_test.sh
├── to_onnx.sh
├── flops.sh
├── qat_deploy_dist_pytorch.sh
├── train.sh
├── test.sh
├── inference.sh
├── train_sparse.sh
├── train_qat_dist_pytorch.sh
├── qat_deploy.sh
├── train_qat.sh
└── train_ptq.sh
├── .gitignore
├── .flake8
├── easy_setup.sh
├── requirements.txt
├── configs
├── det
│ ├── yolov5
│ │ └── anchors.json
│ └── custom
│ │ ├── custom_dataset.yaml
│ │ └── rank_dataset.yaml
├── det_3d
│ ├── second
│ │ └── anchors.json
│ └── pointpillar
│ │ └── anchors.json
└── ssl
│ └── mocov3
│ └── moco_v3.yaml
└── benchmark
├── multitask_benchmark.md
├── quant_benchmark.md
├── 3d_detection_benchmark.md
└── distillation.md
/up/utils/env/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/controller/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/up/apis/__init__.py:
--------------------------------------------------------------------------------
1 | from .inference import * # noqa
--------------------------------------------------------------------------------
/up/runner/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_runner import BaseRunner # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/cls/models/heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .cls_head import * # noqa
--------------------------------------------------------------------------------
/up/tasks/cls/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .cls_loss import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | from .fpn import FPN # noqa
--------------------------------------------------------------------------------
/up/tasks/quant/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .model_helper import * # noqa
--------------------------------------------------------------------------------
/up/tasks/seg/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .seg_loss import * # noqa
--------------------------------------------------------------------------------
/up/tasks/ssl/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .ssl_loss import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/cascade/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/fcos/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov6/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/nas/metax/commands/__init__.py:
--------------------------------------------------------------------------------
1 | from .search import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/sparse/models/heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .cls_head import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/cascade/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .head import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/condinst/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/efl/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .losses import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .transforms import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .transforms import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .center_loss import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/runner/__init__.py:
--------------------------------------------------------------------------------
1 | from .point_runner import PointRunner # noqa
--------------------------------------------------------------------------------
/up/tasks/distill/runner/__init__.py:
--------------------------------------------------------------------------------
1 | from .kd_runner import KDRunner # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/multitask/runner/__init__.py:
--------------------------------------------------------------------------------
1 | from . import multitask_runner # noqa
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/runner/__init__.py:
--------------------------------------------------------------------------------
1 | from .bignas_runner import * # noqa
2 |
--------------------------------------------------------------------------------
/up-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ModelTC/United-Perception/HEAD/up-logo.png
--------------------------------------------------------------------------------
/up/tasks/cls/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .cls_postprocess import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/models/heads/bbox_head/__init__.py:
--------------------------------------------------------------------------------
1 | from .bbox_head import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/models/heads/roi_head/__init__.py:
--------------------------------------------------------------------------------
1 | from .retina_head import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/fcos/models/head/__init__.py:
--------------------------------------------------------------------------------
1 | from .fcos_head import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/onenet/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .transforms import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/nas/metax/models/decoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .deeplab import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/ssl/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .ssl_postprocess import * # noqa
--------------------------------------------------------------------------------
/up/data/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_evaluator import Evaluator, Metric # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/condinst/models/head/__init__.py:
--------------------------------------------------------------------------------
1 | from .condinst_head import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/efl/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .optimizer_helper import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/onenet/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .postprocess import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/models/neck/__init__.py:
--------------------------------------------------------------------------------
1 | from .pafpn import YoloxPAFPN # noqa
--------------------------------------------------------------------------------
/up/tasks/multitask/models/wrappers/__init__.py:
--------------------------------------------------------------------------------
1 | from .multitask_wrapper import * # noqa
--------------------------------------------------------------------------------
/up/tasks/cls/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import * # noqa
2 | from .models import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/models/neck/__init__.py:
--------------------------------------------------------------------------------
1 | from .yolov5_pan import * # noqa F401
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/models/head/__init__.py:
--------------------------------------------------------------------------------
1 | from .yolox_head import YoloXHead # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/backbones_2d/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_bev_backbone import * # noqa
2 |
--------------------------------------------------------------------------------
/up/tasks/nas/__init__.py:
--------------------------------------------------------------------------------
1 | from .bignas import * # noqa
2 | from .metax import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/sparse/__init__.py:
--------------------------------------------------------------------------------
1 | from .runner import * # noqa
2 | from .models import * # noqa
--------------------------------------------------------------------------------
/up/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbones import * # noqa
2 | from .model_helper import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .datasets import * # noqa
2 | from .metrics import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/models/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .darknetv5 import * # noqa F401
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/models/head/__init__.py:
--------------------------------------------------------------------------------
1 | from .yolov5_head import * # noqa F401
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov6/models/neck/__init__.py:
--------------------------------------------------------------------------------
1 | from .reppan import RepPANNeck # noqa F401
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/models/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .cspdarknet import * # noqa F401
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/models/preprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .multiscale import Multiscale # noqa
--------------------------------------------------------------------------------
/up/tasks/multitask/__init__.py:
--------------------------------------------------------------------------------
1 | from .runner import * # noqa
2 | from .models import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/nas/metax/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import * # noqa
2 | from .commands import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/ssl/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .losses import * # noqa
2 | from .postprocess import * # noqa
--------------------------------------------------------------------------------
/up/utils/model/optim/__init__.py:
--------------------------------------------------------------------------------
1 | from .lars import LARS # noqa
2 | from .lamb import LAMB # noqa
--------------------------------------------------------------------------------
/docs/source/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ModelTC/United-Perception/HEAD/docs/source/logo.png
--------------------------------------------------------------------------------
/up/tasks/det/plugins/efl/__init__.py:
--------------------------------------------------------------------------------
1 | from .models import * # noqa
2 | from .utils import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/onenet/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import * # noqa
2 | from .models import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov6/models/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .efficientrep import * # noqa F401
2 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov6/models/head/__init__.py:
--------------------------------------------------------------------------------
1 | from .effidehead import Effidehead # noqa F401
2 |
--------------------------------------------------------------------------------
/up/tasks/ssl/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .ssl_transforms import * # noqa
2 | from .ssl_dataset import * # noqa
--------------------------------------------------------------------------------
/up/tasks/multitask/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .wrappers import * # noqa
2 | from .union_heads import * # noqa
--------------------------------------------------------------------------------
/up/tasks/nas/metax/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbones import * # noqa
2 | from .decoder import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/sparse/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .heads import * # noqa
2 | from .model_helper import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/models/heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .roi_head import * # noqa
2 | from .bbox_head import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/efl/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .efl import * # noqa
2 | from .eqfl import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/fcos/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .head import * # noqa
2 | from .postprocess import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_head import * # noqa
2 | from .center_head import * # noqa
--------------------------------------------------------------------------------
/docs/source/Chinese/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ModelTC/United-Perception/HEAD/docs/source/Chinese/logo.png
--------------------------------------------------------------------------------
/docs/source/English/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ModelTC/United-Perception/HEAD/docs/source/English/logo.png
--------------------------------------------------------------------------------
/up/data/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_dataset import BaseDataset # noqa
2 | from .transforms import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/cascade/models/head/__init__.py:
--------------------------------------------------------------------------------
1 | from .bbox_head import * # noqa
2 | from .bbox import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/condinst/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .head import * # noqa
2 | from .postprocess import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from .kitti_dataset import * # noqa
2 | from .transforms import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/distill/models/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from .mimic_adapt import * # noqa
3 | from .utils import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/seg/models/decoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .segformer_decoder import * # noqa
2 | from .ocrnet import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/seg/models/encoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .segformer_encoder import * # noqa
2 | from .hrnet import * # noqa
3 |
--------------------------------------------------------------------------------
/up/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .env import * # noqa
2 | from .model import * # noqa
3 | from .general import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/det/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import * # noqa
2 | from .models import * # noqa
3 | from .plugins import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .optimizer_helper import * # noqa
2 | from .lr_helper import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import * # noqa
2 | from .models import * # noqa
3 | from .runner import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/backbones_3d/vfe/__init__.py:
--------------------------------------------------------------------------------
1 | from .mean_vfe import * # noqa
2 | from .pillar_vfe import * # noqa
3 |
--------------------------------------------------------------------------------
/up/tasks/seg/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import * # noqa
2 | from .models import * # noqa
3 | from .utils import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/ssl/__init__.py:
--------------------------------------------------------------------------------
1 | from .wrapper import * # noqa
2 | from .data import * # noqa
3 | from .models import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/multitask/models/union_heads/__init__.py:
--------------------------------------------------------------------------------
1 | from .union_retina_cls import * # noqa
2 | from .union_fc_cls import * # noqa
--------------------------------------------------------------------------------
/up/tasks/quant/__init__.py:
--------------------------------------------------------------------------------
1 | from .deploy import * # noqa
2 | from .runner import * # noqa
3 | from .models import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/seg/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .encoder import * # noqa
2 | from .decoder import * # noqa
3 | from .losses import * # noqa
--------------------------------------------------------------------------------
/up/tasks/cls/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .heads import * # noqa
2 | from .losses import * # noqa
3 | from .postprocess import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import * # noqa
2 | from .models import * # noqa
3 | from .utils import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov6/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbone import * # noqa
2 | from .head import * # noqa
3 | from .neck import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import * # noqa
2 | from .models import * # noqa
3 | from .utils import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/quant/runner/__init__.py:
--------------------------------------------------------------------------------
1 | try: # noqa
2 | from .quant_runner import QuantRunner # noqa
3 | except: # noqa
4 | pass # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_head_postprocess import * # noqa
2 | from .center_head_postprocess import * # noqa
--------------------------------------------------------------------------------
/up/tasks/distill/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .ce_loss import * # noqa
2 | from .kl_loss import * # noqa
3 | from .l2_loss import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/quant/deploy/__init__.py:
--------------------------------------------------------------------------------
1 | try: # noqa
2 | from .quant_deploy import QuantDeploy # noqa
3 | except: # noqa
4 | pass # noqa
5 |
--------------------------------------------------------------------------------
/up/tasks/seg/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .optimizer_helper import * # noqa
2 | from .embed import * # noqa
3 | from .shape_convert import * # noqa
--------------------------------------------------------------------------------
/up/tasks/sparse/runner/__init__.py:
--------------------------------------------------------------------------------
1 | try: # noqa
2 | from .sparse_runner import SparseRunner # noqa
3 | except: # noqa
4 | pass # noqa
--------------------------------------------------------------------------------
/up/tasks/det/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_generator import * # noqa
2 | from .box_sampler import * # noqa
3 | from .matcher import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/backbones_3d/__init__.py:
--------------------------------------------------------------------------------
1 | from .vfe import * # noqa
2 | from .spconv_backbone import * # noqa
3 | from .map_to_bev import * # noqa
--------------------------------------------------------------------------------
/up/tasks/ssl/wrapper/__init__.py:
--------------------------------------------------------------------------------
1 | from .moco import * # noqa
2 | from .simclr import * # noqa
3 | from .simsiam import * # noqa
4 | from .mae import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .hook_helper import * # noqa
2 | from .lr_helper import * # noqa
3 | from .optimizer_helper import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/distill/__init__.py:
--------------------------------------------------------------------------------
1 | from .mimicker import * # noqa
2 | from .models import * # noqa
3 | from .losses import * # noqa
4 | from .runner import * # noqa
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/models/ops/__init__.py:
--------------------------------------------------------------------------------
1 | from .dynamic_blocks import * # noqa
2 | from .dynamic_ops import * # noqa
3 | from .dynamic_utils import * # noqa
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## United Perception
2 |
3 |
4 |
5 |
6 | ## 文档
7 |
8 | [up 官方文档](https://modeltc-up.readthedocs.io/en/latest/index.html)
9 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .kitti_evaluator import * # noqa
2 | from .kitti_object_eval_python import * # noqa
3 | from .product_evaluator import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .search_space import * # noqa
2 | from .necks import * # noqa
3 | from .backbone import * # noqa
4 | from .heads import * # noqa
5 |
--------------------------------------------------------------------------------
/up/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from up.utils.general.registry_factory import LOSSES_REGISTRY
2 |
3 |
4 | def build_loss(loss_cfg):
5 | return LOSSES_REGISTRY.build(loss_cfg)
6 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/fcos/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .fcos_predictor import * # noqa
2 | from .fcos_supervisor import * # noqa
3 | from .fcos_postprocess import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/__init__.py:
--------------------------------------------------------------------------------
1 | from .controller import * # noqa
2 | from .models import * # noqa
3 | from .runner import * # noqa
4 | from .utils.saver_helper import * # noqa
5 |
--------------------------------------------------------------------------------
/up/extensions/ext.py:
--------------------------------------------------------------------------------
1 | try:
2 | from ._C import * # noqa
3 | except: # noqa
4 | try:
5 | from up_extensions._C import * # noqa
6 | except: # noqa
7 | pass
8 |
--------------------------------------------------------------------------------
/up/tasks/cls/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .cls_dataloader import * # noqa
2 | from .cls_dataset import * # noqa
3 | from .cls_evaluator import * # noqa
4 | from .cls_transforms import * # noqa
--------------------------------------------------------------------------------
/up/tasks/seg/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .seg_dataset import * # noqa
2 | from .seg_evaluator import * # noqa
3 | from .seg_transfomer import * # noqa
4 | from .seg_dataloader import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/condinst/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .condinst_postprocess import * # noqa
2 | from .condinst_predictor import * # noqa
3 | from .condinst_supervisor import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/onenet/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .onenet_postprocess import * # noqa
2 | from .onenet_supervisor import * # noqa
3 | from .onenet_predictor import * # noqa
4 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .yolov5_post_process import * # noqa
2 | from .roi_predictor import * # noqa F401
3 | from .roi_supervisor import * # noqa F401
--------------------------------------------------------------------------------
/up/tasks/det/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .heads import * # noqa
2 | from .necks import * # noqa
3 | from .postprocess import * # noqa
4 | from .losses import * # noqa
5 | from .utils import * # noqa
6 |
--------------------------------------------------------------------------------
/up/tasks/multitask/models/union_heads/utils.py:
--------------------------------------------------------------------------------
1 | def get_inplane(inplanes, idx):
2 | if isinstance(inplanes, list):
3 | return inplanes[idx]
4 | else:
5 | return inplanes
6 |
--------------------------------------------------------------------------------
/up/data/samplers/__init__.py:
--------------------------------------------------------------------------------
1 | from .batch_sampler import BaseBatchSampler, AspectRatioGroupedBatchSampler # noqa
2 | from .sampler import DistributedSampler, TestDistributedSampler, LocalSampler # noqa
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .yolox_postprocess import YoloxPostProcess # noqa
2 | from .roi_predictor import * # noqa F401
3 | from .roi_supervisor import * # noqa F401
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/metrics/kitti_object_eval_python/__init__.py:
--------------------------------------------------------------------------------
1 | from .eval import * # noqa
2 | from .evaluate import * # noqa
3 | from .kitti_common import * # noqa
4 | from .rotate_iou import * # noqa
5 |
--------------------------------------------------------------------------------
/docs/source/Chinese/installation/index.rst:
--------------------------------------------------------------------------------
1 | 安装说明/Installation
2 | =====================
3 |
4 | 使用脚本编译,我们提供了easy_setup.sh脚本方便直接编译代码:
5 |
6 | .. code-block:: bash
7 |
8 | ./easy_setup.sh
9 |
10 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbone import * # noqa
2 | from .head import * # noqa
3 | from .neck import * # noqa
4 | from .preprocess import * # noqa
5 | from .postprocess import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/postprocess/anchor_head_postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_head_post_process import * # noqa
2 | from .anchor_head_predictor import * # noqa
3 | from .anchor_head_supervisor import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/postprocess/center_head_postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .center_head_post_process import * # noqa
2 | from .center_head_predictor import * # noqa
3 | from .center_head_supervisior import * # noqa
--------------------------------------------------------------------------------
/up/tasks/nas/metax/models/backbones/ssds/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_ssd import BaseSSD # noqa
2 | from .xmnet_ssd import xmnetSSD # noqa
3 |
4 |
5 | def ssd_entry(ssd_type):
6 | return globals()[ssd_type]()
7 |
--------------------------------------------------------------------------------
/docs/source/English/installation/index.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 | We support easy_setup.sh for directly compiling by scripts:
5 |
6 | .. code-block:: bash
7 |
8 | ./easy_setup.sh
9 |
10 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbone import * # noqa
2 | from .head import * # noqa
3 | from .neck import * # noqa
4 | from .initializer import init_yolov5 # noqa
5 | from .postprocess import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbones_2d import * # noqa
2 | from .backbones_3d import * # noqa
3 | from .heads import * # noqa
4 | from .postprocess import * # noqa
5 | from .utils import * # noqa
6 | from .losses import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .anchor_generator import * # noqa
2 | from .anchor_generator_eagle import * # noqa
3 | from .model_nms_utils import * # noqa
4 | from .voxel_generator import * # noqa
5 | from .center_utils import * # noqa
6 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/utils/registry_factory.py:
--------------------------------------------------------------------------------
1 | from up.utils.general.registry import Registry
2 |
3 | DYNAMIC_OPS_REGISTRY = Registry()
4 | DYNAMIC_BLOCKS_REGISTRY = Registry()
5 | STATIC_BLOCKS_REGISTRY = Registry()
6 | CONTROLLER_REGISTRY = Registry()
7 |
--------------------------------------------------------------------------------
/up/tasks/det/data/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .coco_evaluator import CocoEvaluator # noqa
2 | from .lvis_evaluator import LvisEvaluator # noqa
3 | from .custom_evaluator import CustomEvaluator # noqa
4 | from .group_evaluator import GroupEvaluator # noqa
5 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | from .yolox import * # noqa
2 | from .yolov5 import * # noqa
3 | from .yolov6 import * # noqa
4 | from .efl import * # noqa
5 | from .fcos import * # noqa
6 | from .condinst import * # noqa
7 | from .cascade import * # noqa
8 |
--------------------------------------------------------------------------------
/up/utils/general/__init__.py:
--------------------------------------------------------------------------------
1 | from .hook_helper import * # noqa
2 | from .saver_helper import * # noqa
3 | from .vis_helper import * # noqa
4 | from .checkpoint import * # noqa
5 | from .toonnx_helper import * # noqa
6 | from .model_wrapper import * # noqa
7 |
--------------------------------------------------------------------------------
/up/tasks/det/data/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .coco_dataset import CocoDataset # noqa
2 | from .lvis_dataset import LvisDataset, LvisV1Dataset # noqa
3 | from .custom_dataset import CustomDataset, RankCustomDataset # noqa
4 | from .det_transforms import * # noqa
5 |
--------------------------------------------------------------------------------
/docs/source/Chinese/usefultools/index.rst:
--------------------------------------------------------------------------------
1 | 有用工具/UsefulTools
2 | =====================
3 |
4 | 本教程将介绍 UP 中的 Hook 与可视化工具。
5 |
6 |
7 | .. toctree::
8 | :maxdepth: 1
9 | :caption: Table of Contents
10 |
11 | hook/index
12 | visualization/index
--------------------------------------------------------------------------------
/scripts/dist_inference.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=../
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$2
7 |
8 | python -m up inference \
9 | --ng=$1
10 | --launch=pytorch \
11 | --config=$cfg \
12 | 2>&1 | tee log.inference.$T.$(basename $cfg)
13 |
--------------------------------------------------------------------------------
/up/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .image_reader import ImageReader # noqa
2 | from .data_loader import BaseDataLoader # noqa
3 | from .data_builder import BaseDataLoaderBuilder # noqa
4 | from .datasets import * # noqa
5 | from .metrics import * # noqa
6 | from .samplers import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/models/utils/nms_wrapper.py:
--------------------------------------------------------------------------------
1 | from torchvision.ops import nms as t_nms
2 |
3 |
4 | def nms(cls_rois, nms_cfg={}):
5 | thresh = nms_cfg.get('nms_iou_thresh', 0.5)
6 | idx = t_nms(cls_rois[:, :4], cls_rois[:, 4], thresh)
7 | return cls_rois[idx], idx
8 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .dataset import * # noqa
2 | from .metrics import * # noqa
3 | from .box_coder_utils import * # noqa
4 | from .box_utils import * # noqa
5 | from .data_loader import * # noqa
6 | from .data_reader import * # noqa
7 | from .data_utils import * # noqa
--------------------------------------------------------------------------------
/up/tasks/det/models/postprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .retina_post_process import * # noqa
2 | from .roi_supervisor import * # noqa
3 | from .roi_predictor import * # noqa
4 | from .bbox_post_process import * # noqa
5 | from .bbox_predictor import * # noqa
6 | from .bbox_supervisor import * # noqa
7 |
--------------------------------------------------------------------------------
/up/utils/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .ema_helper import EMA # noqa
2 | from .lr_helper import BaseLRScheduler # noqa
3 | from .optimizer_helper import * # noqa
4 | from .act_fn import * # noqa
5 | from .initializer import * # noqa
6 | from .normalize import * # noqa
7 | from .pos_embed import * # noqa
--------------------------------------------------------------------------------
/docs/source/English/usefultools/index.rst:
--------------------------------------------------------------------------------
1 | UsefulTools
2 | ===========
3 |
4 | This section introduces Hook and visualization tools in UP.
5 |
6 |
7 | .. toctree::
8 | :maxdepth: 1
9 | :caption: Table of Contents
10 |
11 | hook/index
12 | visualization/index
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pth
2 | *.pk
3 | *.model
4 | *.zip
5 | *.tar
6 | *.pyc
7 | *.log
8 | *.o
9 | *.so
10 | *.a
11 | *.exe
12 | *.out
13 | .idea
14 | **.DS_Store**
15 | **/__pycache__/**
16 | examples
17 | experiments
18 | datasets/pycocotools
19 | **.swp
20 | build
21 | **/build
22 |
23 | .vscode/
24 | *_build
25 | .env
--------------------------------------------------------------------------------
/scripts/dist_train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=../
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$2
7 | export PYTHONPATH=$ROOT:$PYTHONPATH
8 | python -m up train \
9 | --ng=$1 \
10 | --launch=pytorch \
11 | --config=$cfg \
12 | --display=10 \
13 | 2>&1 | tee log.train.$T.$(basename $cfg)
14 |
--------------------------------------------------------------------------------
/up/commands/__init__.py:
--------------------------------------------------------------------------------
1 | from .train import Train # noqa
2 | from .inference import Inference # noqa
3 | from .eval import Eval # noqa
4 | from .quant_deploy import QuantDeploy # noqa
5 | from .flops import Flops # noqa
6 | from .inference_video import Inference_video # noqa
7 | from .to_onnx import ToOnnx # noqa
8 |
--------------------------------------------------------------------------------
/up/tasks/det/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .smooth_l1_loss import SmoothL1Loss # noqa
2 | from .focal_loss import SigmoidFocalLoss # noqa
3 | from .entropy_loss import SigmoidCrossEntropyLoss # noqa
4 | from .l1_loss import L1Loss # noqa
5 | from .iou_loss import IOULoss # noqa
6 | from .ohem import ohem_loss # noqa
7 |
--------------------------------------------------------------------------------
/scripts/dist_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=../
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$2
7 | export PYTHONPATH=$ROOT:$PYTHONPATH
8 |
9 | python -m up train \
10 | -e \
11 | --ng=$1
12 | --launch=pytorch \
13 | --config=$cfg \
14 | --display=10 \
15 | 2>&1 | tee log.test.$T.$(basename $cfg)
16 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | SPHINXOPTS ?=
2 | SPHINXBUILD ?= sphinx-build
3 | SOURCEDIR = source
4 | BUILDDIR = build
5 |
6 | help:
7 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
8 |
9 | .PHONY: help Makefile
10 |
11 | %: Makefile
12 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
--------------------------------------------------------------------------------
/up/extensions/csrc/iou3d_nms/iou3d_cpu.h:
--------------------------------------------------------------------------------
1 | #ifndef IOU3D_CPU_H
2 | #define IOU3D_CPU_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor);
10 |
11 | #endif
12 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | # This is an example .flake8 config, used when developing *Black* itself.
2 | # Keep in sync with setup.cfg which is used for source packages.
3 |
4 | [flake8]
5 | # W606: reserved keywords
6 | ignore = E203, E266, W503, F405, F403, W606, E731, C901, E701
7 | max-line-length = 120
8 | max-complexity = 18
9 | select = B,C,E,F,W,T4,B9
10 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/controller/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_controller import BaseController # noqa
2 |
3 | from up.tasks.nas.bignas.utils.registry_factory import CONTROLLER_REGISTRY
4 |
5 | imported_vars = list(globals().items())
6 |
7 | for var_name, var in imported_vars:
8 | if callable(var):
9 | CONTROLLER_REGISTRY.register(var_name, var)
10 |
--------------------------------------------------------------------------------
/up/extensions/csrc/roi_align/roi_align.cpp:
--------------------------------------------------------------------------------
1 | #include "roi_align/roi_align.h"
2 |
3 | using at::Tensor;
4 |
5 | int roi_align_forward(bool aligned, int aligned_height, int aligned_width,
6 | float spatial_scale, int sampling_ratio,
7 | Tensor features, Tensor rois, Tensor output) {
8 | // ONNX requires cpu forward support
9 | return 0;
10 | }
11 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/models/search_space/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from .base_bignas_searchspace import BignasSearchSpace # noqa
3 |
4 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
5 |
6 | imported_vars = list(globals().items())
7 |
8 | for var_name, var in imported_vars:
9 | if callable(var):
10 | MODULE_ZOO_REGISTRY.register(var_name, var)
11 |
--------------------------------------------------------------------------------
/easy_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export PATH=/your/path/to/gcc-5.3.0/bin/:$PATH # gcc path
4 | export LD_LIBRARY_PATH=/your/path/to/gmp-4.3.2/lib/:/your/path/to/mpfr-2.4.2/lib/:/your/path/to/mpc-0.8.1/lib/:$LD_LIBRARY_PATH # lib path
5 | export TORCH_CUDA_ARCH_LIST='3.5;5.0+PTX;6.0;7.0' # cuda list
6 |
7 | pip install --user -r requirements.txt
8 |
9 | python setup.py build_ext -i
10 |
--------------------------------------------------------------------------------
/scripts/to_onnx.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=..
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$2
7 | export PYTHONPATH=$ROOT:$PYTHONPATH
8 | CPUS_PER_TASK=${CPUS_PER_TASK:-2}
9 |
10 | python -m up to_onnx \
11 | --ng=$1 \
12 | --launch=pytorch \
13 | --config=$cfg \
14 | --save_prefix=toonnx \
15 | --input_size=3x256x256 \
16 | 2>&1 | tee log.deploy.$(basename $cfg)
17 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/models/necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Import from local
2 | from .big_fpn import ( # noqa: F401
3 | big_fpn, bignas_fpn
4 | )
5 |
6 |
7 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
8 |
9 | imported_vars = list(globals().items())
10 |
11 | for var_name, var in imported_vars:
12 | if callable(var):
13 | MODULE_ZOO_REGISTRY.register(var_name, var)
14 |
--------------------------------------------------------------------------------
/up/utils/general/fake_linklink.py:
--------------------------------------------------------------------------------
1 | class link(object):
2 |
3 | class nn(object):
4 |
5 | class SyncBatchNorm2d(object):
6 | ...
7 |
8 | class syncbnVarMode_t(object):
9 |
10 | class L2(object):
11 | ...
12 |
13 |
14 | class linklink(object):
15 |
16 | class nn(object):
17 |
18 | class SyncBatchNorm2d(object):
19 | ...
20 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | onnx<=1.6.0 # nart 0.2.4 incompatible with onnx 1.7.0
2 | prettytable
3 | scipy==1.1.0
4 | coverage==5.0.3
5 | pycocotools-fix
6 | pandas>=0.25.1
7 | easydict>=1.9
8 | opencv-python>=4.1.0
9 | Cython>=0.29.2,<0.29.15
10 | tensorboardX==1.8.0
11 | matplotlib==3.3.3
12 | pillow==6.2.1
13 | lvis
14 | SharedArray
15 | numba==0.48.0
16 | psutil==5.9.0
17 | scikit-learn==0.21.3
18 | open3d==0.9.0
19 |
--------------------------------------------------------------------------------
/scripts/flops.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=../
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$2
7 | export PYTHONPATH=$ROOT:$PYTHONPATH
8 | CPUS_PER_TASK=${CPUS_PER_TASK:-4}
9 | jobname=flops
10 |
11 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
12 | python -m up flops \
13 | --config=$cfg \
14 | --input_size=3,224,224 \
15 | 2>&1 | tee log.flops.$T.$(basename, $cfg)
16 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tasks/index.rst:
--------------------------------------------------------------------------------
1 | .. _tasks:
2 |
3 | 子任务/Tasks
4 | ========================
5 |
6 | 本教程将介绍UP支持的子任务类型与相关特殊模块
7 |
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 | :caption: Table of Contents
12 |
13 | tasks/cls
14 | tasks/det
15 | tasks/distill
16 | tasks/ssl
17 | tasks/seg
18 | tasks/multitask
19 | tasks/sparse
20 | tasks/3ddet
21 | tasks/quant
22 | tasks/bignas
23 |
--------------------------------------------------------------------------------
/up/tasks/nas/metax/models/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | from .xmnet import xmnet # noqa
2 | from .xmnet_search import xmnet_search # noqa
3 | from .seg_xmnet import seg_xmnet # noqa
4 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY # noqa
5 |
6 | imported_vars = list(globals().items())
7 |
8 | for var_name, var in imported_vars:
9 | if callable(var):
10 | MODULE_ZOO_REGISTRY.register(var_name, var)
11 |
--------------------------------------------------------------------------------
/scripts/qat_deploy_dist_pytorch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # cmd example: sh qat_deploy_dist_pytorch.sh
4 |
5 | UP=/path to up
6 | MQB=/path to mqbench
7 |
8 | cfg=$UP/configs/quant/det/faster_rcnn/faster_rcnn_r50_fpn_improve_quant_trt_qat_deploy.yaml
9 |
10 |
11 | export PYTHONPATH=$UP:$PYTHONPATH
12 | export PYTHONPATH=$MQB:$PYTHONPATH
13 |
14 | nohup python -m up quant_deploy \
15 | --config=$cfg > deploy_log.txt 2>&1 &
--------------------------------------------------------------------------------
/scripts/train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=../
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$2
7 | export PYTHONPATH=$ROOT:$PYTHONPATH
8 | CPUS_PER_TASK=${CPUS_PER_TASK:-4}
9 | jobname=train
10 |
11 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
12 | python -m up train \
13 | --ng=$2 \
14 | --launch=pytorch \
15 | --config=$cfg \
16 | --display=10 \
17 | 2>&1 | tee log.train.$T.$(basename $cfg)
18 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | 教程/Tutorials
2 | ==============
3 |
4 | 本教程将介绍用户如何配置使用UP各模块以及如何实现自己的算法与模型
5 |
6 |
7 | .. toctree::
8 | :maxdepth: 1
9 | :caption: Table of Contents
10 |
11 | guide/configs
12 | guide/model_flow
13 | guide/datasets
14 | guide/augmentations
15 | guide/trainer
16 | guide/saver
17 | guide/normalization
18 | guide/loss
19 | guide/fp16
20 | guide/quant
21 | guide/environment
22 |
--------------------------------------------------------------------------------
/docs/source/English/tasks/index.rst:
--------------------------------------------------------------------------------
1 | .. _tasksEn:
2 |
3 | Tasks
4 | =====
5 |
6 | This section introduces the supported sub-tasks and related special modules of UP.
7 |
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 | :caption: Table of Contents
12 |
13 | tasks/cls
14 | tasks/det
15 | tasks/distill
16 | tasks/ssl
17 | tasks/seg
18 | tasks/multitask
19 | tasks/sparse
20 | tasks/3ddet
21 | tasks/quant
22 | tasks/bignas
23 |
--------------------------------------------------------------------------------
/scripts/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=../
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$2
7 | export PYTHONPATH=$ROOT:$PYTHONPATH
8 | CPUS_PER_TASK=${CPUS_PER_TASK:-4}
9 | jobname=test
10 |
11 |
12 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
13 | python -m up train \
14 | --ng=$2 \
15 | --launch=pytorch \
16 | -e \
17 | --config=$cfg \
18 | --display=10 \
19 | 2>&1 | tee log.test.$T.$(basename $cfg)
20 |
--------------------------------------------------------------------------------
/scripts/inference.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=../
4 | T=`date +%m%d%H%M`
5 | export ROOT=$ROOT
6 | cfg=$1
7 | export PYTHONPATH=$ROOT:$PYTHONPATH
8 | CPUS_PER_TASK=${CPUS_PER_TASK:-4}
9 | jobname=inference
10 |
11 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
12 | python -m up inference \
13 | --ng=$2 \
14 | --launch=pytorch \
15 | --config=$cfg \
16 | -i=imgs \
17 | -c=ckpt \
18 | 2>&1 | tee log.inference.$T.$(basename $cfg)
19 |
--------------------------------------------------------------------------------
/scripts/train_sparse.sh:
--------------------------------------------------------------------------------
1 | ROOT=../
2 | T=`date +%m%d%H%M`
3 | export ROOT=$ROOT
4 | cfg=$2
5 | export PYTHONPATH=$ROOT:$PYTHONPATH
6 | CPUS_PER_TASK=${CPUS_PER_TASK:-4}
7 | MSB=/path to msbench
8 | export PYTHONPATH=$MSB:$PYTHONPATH
9 | jobname=train_sparse
10 |
11 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
12 | python -m up train \
13 | --ng=$2 \
14 | --launch=pytorch \
15 | --config=$cfg \
16 | --display=100 \
17 | 2>&1 | tee ./logs/log.train.$T.$(basename $cfg)
18 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/models/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .big_resnet_basic import (big_resnet_basic, # noqa: F401
2 | bignas_resnet_basic)
3 | from .big_regnet import (big_regnet, # noqa: F401
4 | bignas_regnet)
5 |
6 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
7 |
8 | imported_vars = list(globals().items())
9 |
10 | for var_name, var in imported_vars:
11 | if callable(var):
12 | MODULE_ZOO_REGISTRY.register(var_name, var)
13 |
--------------------------------------------------------------------------------
/scripts/train_qat_dist_pytorch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # cmd example: sh train_qat_dist_pytorch.sh 2
4 |
5 | UP=/path to up
6 | MQB=/path to mqbench
7 |
8 | cfg=$UP/configs/quant/det/retinanet/retinanet-r18-improve_quant_trt_qat.yaml
9 |
10 |
11 | export PYTHONPATH=$UP:$PYTHONPATH
12 | export PYTHONPATH=$MQB:$PYTHONPATH
13 |
14 |
15 | nohup python -m up train \
16 | --ng=$1 \
17 | --launch=pytorch \
18 | --config=$cfg \
19 | --display=10 > train_log.txt 2>&1 &
--------------------------------------------------------------------------------
/docs/source/English/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | Tutorials
2 | =========
3 |
4 | The tutorials guide users to configure and use UP modules, and implement their own algorithms and models
5 |
6 | .. toctree::
7 | :maxdepth: 1
8 | :caption: Table of Contents
9 |
10 | guide/configs
11 | guide/model_flow
12 | guide/datasets
13 | guide/augmentations
14 | guide/trainer
15 | guide/saver
16 | guide/normalization
17 | guide/loss
18 | guide/fp16
19 | guide/quant
20 | guide/environment
21 | guide/sparse
22 |
--------------------------------------------------------------------------------
/configs/det/yolov5/anchors.json:
--------------------------------------------------------------------------------
1 | [
2 | [
3 | 10,
4 | 13
5 | ],
6 | [
7 | 16,
8 | 30
9 | ],
10 | [
11 | 33,
12 | 23
13 | ],
14 | [
15 | 30,
16 | 61
17 | ],
18 | [
19 | 62,
20 | 45
21 | ],
22 | [
23 | 59,
24 | 119
25 | ],
26 | [
27 | 116,
28 | 90
29 | ],
30 | [
31 | 156,
32 | 198
33 | ],
34 | [
35 | 373,
36 | 326
37 | ]
38 | ]
--------------------------------------------------------------------------------
/scripts/qat_deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # cmd example: sh qat_deploy.sh 1 1 ToolChain
4 |
5 | UP=/path to up
6 | MQB=/path to mqbench
7 |
8 | cfg=$UP/configs/quant/det/faster_rcnn/faster_rcnn_r50_fpn_improve_quant_trt_qat_deploy.yaml
9 |
10 | jobname=qat_deploy
11 |
12 |
13 | export PYTHONPATH=$UP:$PYTHONPATH
14 | export PYTHONPATH=$MQB:$PYTHONPATH
15 |
16 |
17 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
18 | nohup python -u -m up quant_deploy \
19 | --config=$cfg \
20 | > qat_deploy.txt 2>&1 &
--------------------------------------------------------------------------------
/up/utils/general/context.py:
--------------------------------------------------------------------------------
1 | import builtins
2 | import contextlib
3 | import copy
4 |
5 |
6 | @contextlib.contextmanager
7 | def no_print():
8 | print_ = builtins.print
9 | builtins.print = lambda *args: args
10 | yield
11 | builtins.print = print_
12 |
13 |
14 | @contextlib.contextmanager
15 | def config(cfg, phase, allow_empty=True):
16 | if phase not in cfg:
17 | yield cfg
18 | else:
19 | tmp_cfg = copy.deepcopy(cfg)
20 | tmp_cfg.update(tmp_cfg.pop(phase))
21 | yield tmp_cfg
22 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/guide/loss.rst:
--------------------------------------------------------------------------------
1 | 损失函数
2 | ========
3 |
4 | UP支持以下损失函数:
5 |
6 | entropy_loss
7 |
8 | * softmax_cross_entropy
9 |
10 | .. code-block:: yaml
11 |
12 | loss:
13 | type: softmax_cross_entropy
14 | kwargs:
15 | class_dim: -1
16 |
17 | * sigmoid_cross_entropy
18 |
19 | .. code-block:: yaml
20 |
21 | loss:
22 | type: sigmoid_cross_entropy
23 |
24 | l1_loss
25 |
26 | * l1_loss
27 |
28 | .. code-block:: yaml
29 |
30 | loss:
31 | type: l1_loss
32 |
--------------------------------------------------------------------------------
/scripts/train_qat.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # cmd example: sh train_qat.sh 1 8 ToolChain
4 |
5 | UP=/path to up
6 | MQB=/path to mqbench
7 |
8 | cfg=$UP/configs/quant/det/retinanet/retinanet-r18-improve_quant_trt_qat.yaml
9 |
10 |
11 | jobname=quant_qat
12 |
13 |
14 | export PYTHONPATH=$UP:$PYTHONPATH
15 | export PYTHONPATH=$MQB:$PYTHONPATH
16 |
17 |
18 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
19 | nohup python -u -m up train \
20 | --ng=$2 \
21 | --launch=pytorch \
22 | --config=$cfg \
23 | --display=10 \
24 | > train_log_qat.txt 2>&1 &
--------------------------------------------------------------------------------
/up/extensions/csrc/psroi_pooling/psroi_pooling.cpp:
--------------------------------------------------------------------------------
1 | #include "psroi_pooling/psroi_pooling.h"
2 |
3 | using at::Tensor;
4 |
5 | int psroi_pooling_forward(int pooled_height,
6 | int pooled_width,
7 | int output_dim,
8 | float spatial_scale,
9 | Tensor features,
10 | Tensor rois,
11 | Tensor output,
12 | Tensor mapping_channel) {
13 | // ONNX requires cpu forward support
14 | return 0;
15 | }
16 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/models/heads/__init__.py:
--------------------------------------------------------------------------------
1 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
2 |
3 |
4 | from .big_retinanetwithbn import ( # noqa: F401
5 | BigRetinaHeadWithBN, BignasRetinaHeadWithBN
6 | )
7 |
8 | from .big_clshead import ( # noqa: F401
9 | BigClsHead
10 | )
11 |
12 | from .big_roi_head import ( # noqa: F401
13 | big_roi_head, bignas_roi_head
14 | )
15 |
16 | imported_vars = list(globals().items())
17 |
18 | for var_name, var in imported_vars:
19 | if callable(var):
20 | MODULE_ZOO_REGISTRY.register(var_name, var)
21 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. UP documentation master file, created by
2 | sphinx-quickstart on Fri February 29 09:56:02 2019.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to UP's documentation!
7 | ===============================
8 |
9 | .. image:: logo.png
10 | :width: 150px
11 |
12 |
13 | UP (united-perception)是一套基于Distributed PyTorch实现的通用感知任务框架,目标是兼顾研究和产品化支持。
14 |
15 | .. toctree::
16 | :maxdepth: 1
17 | :caption: Table of Contents
18 |
19 | Chinese/index
20 | English/index
21 |
--------------------------------------------------------------------------------
/docs/source/English/tutorials/guide/loss.rst:
--------------------------------------------------------------------------------
1 | Loss function
2 | =============
3 |
4 | UP supports the following loss functions.
5 |
6 | entropy_loss
7 |
8 | * softmax_cross_entropy
9 |
10 | .. code-block:: yaml
11 |
12 | loss:
13 | type: softmax_cross_entropy
14 | kwargs:
15 | class_dim: -1
16 |
17 | * sigmoid_cross_entropy
18 |
19 | .. code-block:: yaml
20 |
21 | loss:
22 | type: sigmoid_cross_entropy
23 |
24 | l1_loss
25 |
26 | * l1_loss
27 |
28 | .. code-block:: yaml
29 |
30 | loss:
31 | type: l1_loss
32 |
--------------------------------------------------------------------------------
/up/extensions/csrc/deform_conv/deform_conv.cpp:
--------------------------------------------------------------------------------
1 | #include "deform_conv/deformable_conv.h"
2 |
3 | #include
4 | using at::Tensor;
5 |
6 | int deform_conv_forward(Tensor input, Tensor weight,
7 | Tensor offset, Tensor output,
8 | Tensor columns, Tensor ones, int kH,
9 | int kW, int dH, int dW, int padH, int padW,
10 | int dilationH, int dilationW, int groups,
11 | int deformable_group) {
12 | // ONNX requires operations support cpu forward
13 | return 0;
14 | }
15 |
--------------------------------------------------------------------------------
/up/commands/subcommand.py:
--------------------------------------------------------------------------------
1 | """
2 | Base class for subcommands
3 | """
4 | # Standard Library
5 | import abc
6 |
7 |
8 | class Subcommand(abc.ABC):
9 | """
10 | An abstract class representing subcommands .
11 | If you wanted to (for example) create your own custom `special-evaluate` command to use like
12 |
13 | you would create a ``Subcommand`` subclass and then pass it as an override to
14 | :func:`~commands.main` .
15 | """
16 | @abc.abstractmethod
17 | def add_subparser(self, name, parser):
18 | # pylint: disable=protected-access
19 | raise NotImplementedError
20 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/guide/saver.rst:
--------------------------------------------------------------------------------
1 | 存储模块配置
2 | ============
3 |
4 | 模型,日志等结果保存设置
5 |
6 | .. warning::
7 |
8 | * auto_resume: 该字段为 True 时,自动从 save_dir 下 load 最新 checkpoint。
9 | 优先级 auto_resume > opts > resume_model > pretrain_model
10 |
11 | base
12 | ----
13 |
14 | 模型自动保存在save_dir目录下
15 |
16 | .. code-block:: yaml
17 |
18 | saver: # Required.
19 | save_dir: checkpoints # dir to save checkpoints
20 | pretrain_model: # pretrain_path
21 | results_dir: results_dir # dir to save detection results. i.e., bboxes, masks, keypoints
22 | auto_resume: True
23 |
24 |
--------------------------------------------------------------------------------
/scripts/train_ptq.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # cmd example: sh train_ptq.sh 1 8 ToolChain
4 |
5 | UP=/path to up
6 | MQB=/path to mqbench # github mqbench commit id after 6c222c40d1a176df78bcbf4d334698185f7cd8d8
7 |
8 | cfg=$UP/configs/quant/det/faster_rcnn/faster_rcnn_r18_FPN_2x_quant_qdrop.yaml
9 |
10 | jobname=quant_ptq
11 |
12 | export PYTHONPATH=$UP:$PYTHONPATH
13 | export PYTHONPATH=$MQB:$PYTHONPATH
14 |
15 |
16 | srun -N$1 --gres=gpu:$2 -p $3 --job-name=$jobname --cpus-per-task=2 \
17 | nohup python -u -m up train \
18 | --ng=$2 \
19 | --launch=pytorch \
20 | --config=$cfg \
21 | --display=10 \
22 | > train_log_ptq.txt 2>&1 &
--------------------------------------------------------------------------------
/up/extensions/csrc/roipoint_pool3d/roipoint_pool3d.h:
--------------------------------------------------------------------------------
1 | #ifndef ROIPOINTPOOL3D_H_
2 | #define ROIPOINTPOOL3D_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | using at::Tensor;
9 |
10 |
11 | #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
12 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
13 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
14 |
15 | int roipool3d_gpu(at::Tensor xyz, at::Tensor boxes3d, at::Tensor pts_feature, at::Tensor pooled_features, at::Tensor pooled_empty_flag);
16 |
17 |
18 |
19 |
20 | #endif
--------------------------------------------------------------------------------
/docs/source/Chinese/index.rst:
--------------------------------------------------------------------------------
1 | .. UP documentation master file, created by
2 | sphinx-quickstart on Fri February 29 09:56:02 2019.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | 欢迎阅读UP中文文档
7 | ==================
8 |
9 | .. image:: logo.png
10 | :width: 150px
11 |
12 |
13 | UP (united-perception)是一套基于Distributed PyTorch实现的通用感知任务框架,目标是兼顾研究和产品化支持。
14 |
15 | .. toctree::
16 | :maxdepth: 1
17 | :caption: Table of Contents
18 |
19 | installation/index
20 | get_started/index
21 | tutorials/index
22 | tasks/index
23 | benchmark/index
24 | usefultools/index
25 |
--------------------------------------------------------------------------------
/up/extensions/csrc/iou3d_nms/iou3d_nms_api.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 |
7 | #include "iou3d_cpu.h"
8 | #include "iou3d_nms.h"
9 |
10 | /*
11 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
12 | m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes overlap");
13 | m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou");
14 | m.def("nms_gpu", &nms_gpu, "oriented nms gpu");
15 | m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu");
16 | m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou");
17 | }
18 | */
19 |
--------------------------------------------------------------------------------
/up/extensions/csrc/iou3d_nms/iou3d_nms.h:
--------------------------------------------------------------------------------
1 | #ifndef IOU3D_NMS_H
2 | #define IOU3D_NMS_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap);
10 | int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou);
11 | int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
12 | int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
13 | int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/up/extensions/csrc/nms/nms.h:
--------------------------------------------------------------------------------
1 | #ifndef NMS_H_
2 | #define NMS_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
10 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
11 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
12 |
13 | int gpu_nms(at::Tensor keep, at::Tensor num_out, at::Tensor boxes, float nms_overlap_thresh, int offset);
14 | int cpu_nms(at::Tensor keep_out, at::Tensor num_out, at::Tensor boxes, at::Tensor order,
15 | at::Tensor areas, float nms_overlap_thresh, int offset);
16 |
17 | #endif
18 |
--------------------------------------------------------------------------------
/docs/source/English/tutorials/guide/saver.rst:
--------------------------------------------------------------------------------
1 | Saving config
2 | =============
3 |
4 | The config of saving models and logs.
5 |
6 | .. warning::
7 |
8 | * auto_resume: automatically download the latest checkpoint from save_dir when auto_resume is True.
9 | * Priority: 'auto_resume' > 'opts' > 'resume_model' > 'pretrain_model'.
10 |
11 | base
12 | ----
13 |
14 | Models are automatically saved in the 'save_dir'.
15 |
16 | .. code-block:: yaml
17 |
18 | saver: # Required.
19 | save_dir: checkpoints # dir to save checkpoints
20 | pretrain_model: # pretrain_path
21 | results_dir: results_dir # dir to save detection results. i.e., bboxes, masks, keypoints
22 | auto_resume: True
23 |
--------------------------------------------------------------------------------
/up/__main__.py:
--------------------------------------------------------------------------------
1 | # Standard Library
2 | import argparse
3 |
4 | from up import __version__
5 | from up.utils.general.registry_factory import SUBCOMMAND_REGISTRY
6 |
7 |
8 | def main():
9 |
10 | parser = argparse.ArgumentParser(description="Run Easy Object Detector")
11 | parser.add_argument('--version', action='version', version=__version__)
12 |
13 | subparsers = parser.add_subparsers(title='subcommands')
14 |
15 | for subname, subcommand in SUBCOMMAND_REGISTRY.items():
16 | subcommand().add_subparser(subname, subparsers)
17 |
18 | args = parser.parse_args()
19 |
20 | if 'run' in dir(args):
21 | args.run(args)
22 | else:
23 | parser.print_help()
24 |
25 |
26 | if __name__ == '__main__':
27 | main()
28 |
--------------------------------------------------------------------------------
/benchmark/multitask_benchmark.md:
--------------------------------------------------------------------------------
1 | # multitask benchmark (det baseline: r50-retina-atss-qfl)
2 | | task | backbone | AP | top1 | model |
3 | | ---------------------- | ------------------- | ----- | ----- | ----- |
4 | | [det](https://github.com/ModelTC/United-Perception/tree/main/configs/multitask/r50-retina-atss-qfl.yaml) | resnet50 | 39.20 | - | [ckpt](https://github.com/ModelTC/United-Perception/releases/download/0.2.0_github/det.pth) |
5 | | [det + cls](https://github.com/ModelTC/United-Perception/tree/main/configs/multitask/r50-retina-atss-qfl+cls.yaml) | resnet50 | 40.01 | 67.81 | [ckpt](https://github.com/ModelTC/United-Perception/releases/download/0.2.0_github/det+cls.pth) |
6 |
--------------------------------------------------------------------------------
/docs/source/English/index.rst:
--------------------------------------------------------------------------------
1 | .. UP documentation master file, created by
2 | sphinx-quickstart on Fri February 29 09:56:02 2019.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to read English version
7 | ===============================
8 |
9 | .. image:: logo.png
10 | :width: 150px
11 |
12 |
13 | United-perception (UP) is an universal-perception task framework based on the Distributed PyTorch aimming at combining research and productization.
14 |
15 | .. toctree::
16 | :maxdepth: 1
17 | :caption: Table of Contents
18 |
19 | installation/index
20 | get_started/index
21 | tutorials/index
22 | tasks/index
23 | benchmark/index
24 | usefultools/index
25 |
--------------------------------------------------------------------------------
/up/extensions/python/iou_overlap.py:
--------------------------------------------------------------------------------
1 | # Import from local
2 | from ..ext import overlaps
3 |
4 |
5 | def gpu_iou_overlap(b1, b2, mode='IoU'):
6 | """compute IoU/IoF/IoS between b1 and b2
7 | Args:
8 | b1: Tensor, [N, >=4]
9 | b2: Tensor, [M, >=4]
10 | mode: str, {IoU, IoF, IoS} Intersection over Union/First/Second area
11 | """
12 | if b1.numel() == 0 or b2.numel() == 0:
13 | return b1.new_zeros((0,))
14 |
15 | flag = {'IoU': 0, 'IoF': 1, 'IoS': 2}[mode]
16 |
17 | assert b1.shape[1] >= 4 and b2.shape[1] >= 4
18 | assert b1.is_cuda and b2.is_cuda
19 |
20 | b1 = b1[:, :4].contiguous()
21 | b2 = b2[:, :4].contiguous()
22 | ious = b1.new_zeros((b1.shape[0], b2.shape[0]))
23 | overlaps.iou(b1, b2, ious, flag)
24 | return ious
25 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/guide/fp16.rst:
--------------------------------------------------------------------------------
1 | FP16模式配置
2 | ============
3 |
4 | * 精度:使用fp16在mask-rcnn,keypoint-rcnn,retinanet,faster-rcnn等可以取得fp32同等精度。
5 | * 速度:仅在V100上有加速效果, R50-C4-Faster-RCNN 加速比 1.87 ( vs fp32)。
6 | * 显存:R50-C4-Faster-RCNN 显存节省 46.5%。
7 |
8 | .. note::
9 |
10 | 速度和显存优化和模型有关, 模型越大, 速度和显存提升越大.
11 |
12 | 原理 & 实现
13 |
14 | * fp16是指使用16位浮点数于参数的训练和保存,fp32是指的使用32位浮点数于参数的训练和保存,V100对fp16运算有特别的优化,所以使用fp16可以达到训练加速的效果。直接使用fp16会导致部分梯度置0,导致精度损失。实际过程中,使用fp32拷贝来保存模型,使用scale_factor来改变fp16的数值范围。
15 |
16 | * forward的过程,bn层、loss计算是使用fp32进行的,其余使用fp16进行计算;backward的过程,将fp16参数的grad拷贝到fp32参数的拷贝上,optimizer.step更新fp32参数,最后把fp32参数拷贝回fp16参数上。
17 |
18 | * 具体细节参考 `Mixed Precision Traning `_
19 |
20 | .. code-block:: yaml
21 |
22 | runtime:
23 | # dist
24 | fp16: True
25 |
--------------------------------------------------------------------------------
/up/extensions/csrc/iou_overlap/iou_overlap.h:
--------------------------------------------------------------------------------
1 | #ifndef IOUOVERLAP_H_
2 | #define IOUOVERLAP_H_
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
9 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
10 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11 |
12 | int IOUOverlap(at::Tensor bboxes1_data,
13 | at::Tensor bboxes2_data,
14 | const int size_bbox,
15 | const int num_bbox1,
16 | const int num_bbox2,
17 | at::Tensor top_data,
18 | const int mode);
19 |
20 | void gpu_iou_overlaps(at::Tensor bboxes1, at::Tensor bboxes2, at::Tensor output, const int mode);
21 |
22 | #endif
23 |
--------------------------------------------------------------------------------
/benchmark/quant_benchmark.md:
--------------------------------------------------------------------------------
1 | # quant benchmark
2 |
3 | Based on Mqbench and UP , we provide an object detection benchmark on COCO dataset.
4 |
5 | | Model | Backend | map\@fp32 | ptq\@int8 | qat\@int8 |
6 | |-----------------------|-----------|----------|-----------|----------|
7 | | retinanet-r18-improve | tensorrt | 40.7 | 40.5 | 40.7 |
8 | | retinanet-r18-improve | snpe | 40.7 | 39.7 | 40.2 |
9 | | retinanet-r18-improve | vitis | 40.7 | 39.0 | 40.1 |
10 | | yolox_s | tensorrt | 40.5 | 39.4 | 39.8 |
11 | | yolox_s | snpe | 40.5 | 38.1 | 39.8 |
12 | | yolox_s_lpcv | vitis | 29.3 | 25.3 | 27.4 |
13 | | faster-rcnn-improve | tensorrt | 43.6 | 43.1 | 44.8 |
14 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/guide/normalization.rst:
--------------------------------------------------------------------------------
1 | 数据归一化配置
2 | ==============
3 |
4 | normalization支持五种模式
5 |
6 | * freeze_bn: 固定mean和var
7 |
8 | .. code-block:: yaml
9 |
10 | normalize:
11 | type: freeze_bn
12 |
13 | * solo_bn:单卡统计mean和var,不同步
14 |
15 | .. code-block:: yaml
16 |
17 | normalize:
18 | type: solo_bn
19 |
20 | * pt_sync_bn:pytorch 多卡同步mean和var
21 |
22 | .. code-block:: yaml
23 |
24 | normalize:
25 | type: pt_sync_bn
26 | kwargs:
27 | group_size: 8
28 |
29 | * gn: Group Normalization
30 |
31 | .. code-block:: yaml
32 |
33 | normalize:
34 | type: gn
35 | kwargs:
36 | num_groups: 32
37 |
38 | * caffe_freeze_bn: 使用从caffe预加载的frozen bn
39 |
40 | .. code-block:: yaml
41 |
42 | normalize:
43 | type: caffe_freeze_bn
44 |
--------------------------------------------------------------------------------
/up/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os
3 |
4 |
5 | def is_package(dirname):
6 | return os.path.exists(os.path.join(dirname, '__init__.py'))
7 |
8 |
9 | pwd = os.path.dirname(os.path.realpath(__file__))
10 | tasks_names = os.environ.get("DEFAULT_TASKS", os.listdir(pwd))
11 | exclude_tasks = os.environ.get("EXCLUDE_TASKS", '').split(":")
12 | if not isinstance(exclude_tasks, list):
13 | exclude_tasks = [exclude_tasks]
14 | exclude_tasks = set(exclude_tasks)
15 | if ':' in tasks_names:
16 | tasks_names = tasks_names.split(':')
17 | if not isinstance(tasks_names, list):
18 | tasks_names = [tasks_names]
19 | for fp in tasks_names:
20 | if fp in exclude_tasks:
21 | continue
22 | realpath = os.path.join(pwd, fp)
23 | if os.path.isdir(realpath) and is_package(realpath):
24 | globals()[fp] = importlib.import_module('.' + fp, __package__)
25 |
--------------------------------------------------------------------------------
/up/extensions/csrc/iou_overlap/iou_overlap.cpp:
--------------------------------------------------------------------------------
1 | #include "iou_overlap/iou_overlap.h"
2 |
3 | using at::Tensor;
4 |
5 | void gpu_iou_overlaps(Tensor bboxes1, Tensor bboxes2, Tensor output, const int mode){
6 | // Grad the input tensor
7 | CHECK_INPUT(bboxes1);
8 | CHECK_INPUT(bboxes2);
9 | CHECK_INPUT(output);
10 |
11 | // Number of boxes
12 | int num_bbox1 = bboxes1.size(0);
13 | int num_bbox2 = bboxes2.size(0);
14 | int size_bbox1 = bboxes1.size(1);
15 | int size_bbox2 = bboxes2.size(1);
16 |
17 | AT_ASSERTM(output.is_cuda(), "output must be cuda tensor");
18 |
19 | AT_ASSERTM(size_bbox1 == size_bbox2, "bbox1 dim must match bbox2");
20 |
21 | IOUOverlap(bboxes1,
22 | bboxes2,
23 | size_bbox1,
24 | num_bbox1,
25 | num_bbox2,
26 | output,
27 | mode);
28 | }
29 |
30 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/models/initializer.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 | from up.utils.general.registry_factory import INITIALIZER_REGISTRY
3 | from up.utils.general.log_helper import default_logger as logger
4 |
5 |
6 | @INITIALIZER_REGISTRY.register("yolov5_init")
7 | def init_yolov5(model):
8 | for m in model.modules():
9 | t = type(m)
10 | if t is nn.Conv2d:
11 | pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
12 | elif t is nn.BatchNorm2d:
13 | m.eps = 1e-3
14 | m.momentum = 0.03
15 |
16 | # Freeze
17 | freeze = [] # parameter names to freeze (full or partial)
18 | for k, v in model.named_parameters():
19 | v.requires_grad = True # train all layers
20 | if any(x in k for x in freeze):
21 | logger.info('freezing %s' % k)
22 | v.requires_grad = False
23 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/up/utils/general/global_flag.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class QuantFlag():
4 | def __init__(self):
5 | self.flag = False
6 |
7 |
8 | class DeployFlag():
9 | def __init__(self):
10 | self.flag = False
11 |
12 |
13 | class Aligned():
14 | def __init__(self):
15 | self.aligned = False
16 | self.offset = 1
17 |
18 |
19 | class Fp16Flag():
20 | def __init__(self):
21 | self.fp16 = False
22 |
23 |
24 | class IterBase():
25 | def __init__(self):
26 | self.flag = False
27 |
28 |
29 | class Mosaic_p():
30 | def __init__(self):
31 | self.flag = True
32 |
33 |
34 | class DistBackend():
35 | def __init__(self):
36 | # self.backend = 'linklink'
37 | self.backend = 'dist'
38 |
39 |
40 | ITER_BASE_FLAG = IterBase()
41 | FP16_FLAG = Fp16Flag()
42 | ALIGNED_FLAG = Aligned()
43 | DIST_BACKEND = DistBackend()
44 | QUANT_FLAG = QuantFlag()
45 | DEPLOY_FLAG = QuantFlag()
46 |
--------------------------------------------------------------------------------
/docs/source/English/tutorials/guide/normalization.rst:
--------------------------------------------------------------------------------
1 | Data normalization
2 | ==================
3 |
4 | Normalization supports 5 modes.
5 |
6 | * freeze_bn: fixed mean and var.
7 |
8 | .. code-block:: yaml
9 |
10 | normalize:
11 | type: freeze_bn
12 |
13 | * solo_bn: seperately counting mean and var by the single GPU.
14 |
15 | .. code-block:: yaml
16 |
17 | normalize:
18 | type: solo_bn
19 |
20 | * pt_sync_bn: synchronizing mean and var by multiple GPUs by pytorch.
21 |
22 | .. code-block:: yaml
23 |
24 | normalize:
25 | type: pt_sync_bn
26 | kwargs:
27 | group_size: 8
28 |
29 | * gn: Group Normalization
30 |
31 | .. code-block:: yaml
32 |
33 | normalize:
34 | type: gn
35 | kwargs:
36 | num_groups: 32
37 |
38 | * caffe_freeze_bn: using the frozen bn preloaded from caffe.
39 |
40 | .. code-block:: yaml
41 |
42 | normalize:
43 | type: caffe_freeze_bn
44 |
--------------------------------------------------------------------------------
/docs/source/Chinese/usefultools/visualization/index.rst:
--------------------------------------------------------------------------------
1 | 可视化
2 | ======
3 |
4 | UP 支持两种可视化模式:
5 |
6 | * Inference
7 | * Hook
8 |
9 | Inference
10 | ---------
11 |
12 | 你可以将可视化设置加入推理配置中,如下所示:
13 |
14 | .. code-block:: yaml
15 |
16 | runtime:
17 | inferencer:
18 | type: base
19 | kwargs:
20 | visualizer:
21 | type: plt
22 | kwargs:
23 | class_names: ['__background__', 'person'] # class names
24 | thresh: 0.5
25 |
26 | Hook
27 | ----
28 |
29 | UP 支持可视化hook,你可以将可视化设置加入Hook配置中,这样可以在训练和评估时画出 gt 框和检测框。
30 |
31 | .. code-block:: yaml
32 |
33 | - type: visualize
34 | kwargs:
35 | vis_gt:
36 | type: plt
37 | kwargs:
38 | vis_dir: vis_gt
39 | thresh: 0.3
40 | vis_dt:
41 | type: plt
42 | kwargs:
43 | vis_dir: vis_dt
44 | thresh: 0.3
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tasks/tasks/3ddet.rst:
--------------------------------------------------------------------------------
1 | 3D检测
2 | ======
3 |
4 | UP支持3D检测任务训练、推理的全部流程;
5 | `具体代码 `_
6 |
7 | 配置文件
8 | --------
9 |
10 | `代码仓库 `_
11 | 其中包括常用算法配置文件
12 |
13 | 数据集相关模块
14 | --------------
15 |
16 | 1. 数据集类型包括:
17 |
18 | * kitti
19 |
20 | 2. 数据集类型通过设置Dataset的type来选择,默认为kitti,配置文件示例如下:
21 |
22 | .. code-block:: yaml
23 |
24 | dataset:
25 | type: kitti
26 | kwargs:
27 | meta_file: kitti/kitti_infos/kitti_infos_train.pkl
28 | class_names: *class_names
29 | get_item_list: &get_item_list ['points']
30 | training: True
31 | transformer: [*point_sampling, *point_flip,*point_rotation,*point_scaling, *to_voxel_train]
32 | image_reader:
33 | type: kitti
34 | kwargs:
35 | image_dir: kitti/training/
36 | color_mode: None
37 |
--------------------------------------------------------------------------------
/up/tasks/seg/utils/shape_convert.py:
--------------------------------------------------------------------------------
1 | def nlc_to_nchw(x, hw_shape):
2 | """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.
3 |
4 | Args:
5 | x (Tensor): The input tensor of shape [N, L, C] before conversion.
6 | hw_shape (Sequence[int]): The height and width of output feature map.
7 |
8 | Returns:
9 | Tensor: The output tensor of shape [N, C, H, W] after conversion.
10 | """
11 | H, W = hw_shape
12 | assert len(x.shape) == 3
13 | B, L, C = x.shape
14 | assert L == H * W, 'The seq_len doesn\'t match H, W'
15 | return x.transpose(1, 2).reshape(B, C, H, W)
16 |
17 |
18 | def nchw_to_nlc(x):
19 | """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.
20 |
21 | Args:
22 | x (Tensor): The input tensor of shape [N, C, H, W] before conversion.
23 |
24 | Returns:
25 | Tensor: The output tensor of shape [N, L, C] after conversion.
26 | """
27 | assert len(x.shape) == 4
28 | return x.flatten(2).transpose(1, 2).contiguous()
29 |
--------------------------------------------------------------------------------
/up/extensions/csrc/roiaware_pool3d/roiaware_pool3d.h:
--------------------------------------------------------------------------------
1 | #ifndef ROIAWAREPOOL3D_H_
2 | #define ROIAWAREPOOL3D_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | using at::Tensor;
9 |
10 |
11 | #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
12 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
13 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
14 |
15 | int roiaware_pool3d_gpu(at::Tensor rois, at::Tensor pts, at::Tensor pts_feature, at::Tensor argmax,
16 | at::Tensor pts_idx_of_voxels, at::Tensor pooled_features, int pool_method);
17 | int roiaware_pool3d_gpu_backward(at::Tensor pts_idx_of_voxels, at::Tensor argmax, at::Tensor grad_out, at::Tensor grad_in, int pool_method);
18 | int points_in_boxes_gpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, at::Tensor box_idx_of_points_tensor);
19 | int points_in_boxes_cpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, at::Tensor pts_indices_tensor);
20 |
21 |
22 |
23 |
24 | #endif
--------------------------------------------------------------------------------
/up/tasks/multitask/models/wrappers/wrapper_utils.py:
--------------------------------------------------------------------------------
1 | # Standard library
2 | import importlib
3 | import types
4 |
5 | # Import from pod
6 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
7 |
8 |
9 | def build_model(mtype, kwargs):
10 | # For traditional uses
11 | if mtype.find('.') >= 0:
12 | module_name, cls_name = mtype.rsplit('.', 1)
13 | module = importlib.import_module(module_name)
14 | cls = getattr(module, cls_name)
15 | return cls(**kwargs)
16 | else: # For usage that models are registered by MODULE_ZOO_REGISTRY
17 | cfg = {'type': mtype, 'kwargs': kwargs}
18 | return MODULE_ZOO_REGISTRY.build(cfg)
19 |
20 |
21 | def instance_method(func, obj):
22 | return types.MethodType(func, obj)
23 |
24 |
25 | def recursive_set(model, key, module):
26 | def _recursive(model, key):
27 | if '.' not in key:
28 | model.add_module(key, module)
29 | else:
30 | cur, key = key.split('.', 1)
31 | _recursive(getattr(model, cur), key)
32 |
33 | _recursive(model, key)
34 |
--------------------------------------------------------------------------------
/up/commands/eval.py:
--------------------------------------------------------------------------------
1 | import json
2 | from .subcommand import Subcommand
3 | import functools
4 | from up.utils.general.registry_factory import EVALUATOR_REGISTRY, SUBCOMMAND_REGISTRY
5 | from up.utils.general.user_analysis_helper import send_info
6 |
7 | __all__ = ['Eval']
8 |
9 |
10 | @SUBCOMMAND_REGISTRY.register('eval')
11 | class Eval(Subcommand):
12 | def add_subparser(self, name, parser):
13 | parser = parser.add_parser(name, help='sub-command for evaluation')
14 | subparsers = parser.add_subparsers(help='sub-command for evaluation')
15 | for name, evaluator in EVALUATOR_REGISTRY.items():
16 | subparser = evaluator.add_subparser(name, subparsers)
17 | subparser.set_defaults(run=functools.partial(_main, evaluator))
18 |
19 | return subparsers
20 |
21 |
22 | def _main(evaluator_cls, args):
23 | send_info(func='evaluate')
24 | print('building evaluator')
25 | evaluator = evaluator_cls.from_args(args)
26 | print('evaluator builded, start to evaluate')
27 | metrics = evaluator.eval(args.res_file)
28 | print(json.dumps(metrics, indent=2))
29 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/backbones_3d/map_to_bev.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
3 |
4 |
5 | @MODULE_ZOO_REGISTRY.register('height_compression')
6 | class HeightCompression(nn.Module):
7 | def __init__(self):
8 | super().__init__()
9 |
10 | def forward(self, feature_dict):
11 | """
12 | Args:
13 | batch_dict:
14 | encoded_spconv_tensor: sparse tensor
15 | Returns:
16 | batch_dict:
17 | spatial_features:
18 |
19 | """
20 | encoded_spconv_tensor = feature_dict['encoded_spconv_tensor']
21 | spatial_features = encoded_spconv_tensor.dense()
22 | N, C, D, H, W = spatial_features.shape
23 | spatial_features = spatial_features.view(N, C * D, H, W)
24 | feature_dict.update({'spatial_features': spatial_features})
25 | feature_dict.update({'spatial_features_stride': feature_dict['encoded_spconv_tensor_stride']})
26 | return feature_dict
27 |
28 |
29 | def build_map_to_bev(cfg_map_to_bev):
30 | return MODULE_ZOO_REGISTRY.build(cfg_map_to_bev)
31 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/metrics/kitti_object_eval_python/evaluate.py:
--------------------------------------------------------------------------------
1 | from up.tasks.det_3d.data.metrics.kitti_object_eval_python import kitti_common as kitti
2 | from .eval import get_coco_eval_result, get_official_eval_result
3 | from up.utils.general.petrel_helper import PetrelHelper
4 |
5 |
6 | def _read_imageset_file(path):
7 | lines = []
8 | with PetrelHelper.open(path) as f:
9 | for line in f:
10 | lines.append(line)
11 | return [int(line) for line in lines]
12 |
13 |
14 | def evaluate(label_path,
15 | result_path,
16 | label_split_file,
17 | current_class=0,
18 | coco=False,
19 | score_thresh=-1):
20 | dt_annos = kitti.get_label_annos(result_path)
21 | if score_thresh > 0:
22 | dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
23 | val_image_ids = _read_imageset_file(label_split_file)
24 | gt_annos = kitti.get_label_annos(label_path, val_image_ids)
25 | if coco:
26 | return get_coco_eval_result(gt_annos, dt_annos, current_class)
27 | else:
28 | return get_official_eval_result(gt_annos, dt_annos, current_class)
29 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/models/backbones_3d/vfe/mean_vfe.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .pillar_vfe import VFETemplate
3 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
4 |
5 |
6 | @MODULE_ZOO_REGISTRY.register('mean_vfe')
7 | class MeanVFE(VFETemplate):
8 | def __init__(self):
9 | super().__init__()
10 |
11 | def forward(self, batch_dict):
12 | """
13 | Args:
14 | batch_dict:
15 | voxels: (num_voxels, max_points_per_voxel, C)
16 | voxel_num_points: optional (num_voxels)
17 | **kwargs:
18 |
19 | Returns:
20 | vfe_features: (num_voxels, C)
21 | """
22 | voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points']
23 | points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
24 | normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features)
25 | points_mean = points_mean / normalizer
26 | voxel_features = points_mean.contiguous()
27 | return voxel_features
28 |
29 |
30 | def build_vfe(cfg_vfe):
31 | return MODULE_ZOO_REGISTRY.build(cfg_vfe)
32 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tasks/tasks/quant.rst:
--------------------------------------------------------------------------------
1 | 量化
2 | ====
3 |
4 | UP支持量化任务训练、推理的全部流程;
5 | `具体代码 `_
6 |
7 | 配置文件
8 | --------
9 |
10 | `代码仓库 `_
11 | 其中包括常用算法配置文件
12 |
13 | 数据集相关模块
14 | --------------
15 |
16 | 1. 数据集类型包括:
17 |
18 | * coco
19 |
20 | 2. 数据集类型通过设置Dataset的type来选择,默认为coco,配置文件示例如下:
21 |
22 | .. code-block:: yaml
23 |
24 | dataset:
25 | type: coco
26 | kwargs:
27 | ...
28 |
29 | 量化设置
30 | --------
31 |
32 | .. code-block:: yaml
33 |
34 | quant:
35 | ptq_only: False
36 | deploy_backend: tensorrt
37 | cali_batch_size: 900
38 | prepare_args:
39 | extra_qconfig_dict:
40 | w_observer: MinMaxObserver
41 | a_observer: EMAMinMaxObserver
42 | w_fakequantize: FixedFakeQuantize
43 | a_fakequantize: FixedFakeQuantize
44 | leaf_module: [Space2Depth, FrozenBatchNorm2d]
45 | extra_quantizer_dict:
46 | additional_module_type: [ConvFreezebn2d, ConvFreezebnReLU2d]
47 |
48 |
49 |
--------------------------------------------------------------------------------
/docs/source/English/usefultools/visualization/index.rst:
--------------------------------------------------------------------------------
1 | Visualization
2 | =============
3 |
4 | UP supports two modes of visualization.
5 |
6 | * Inference
7 | * Hook
8 |
9 | Inference
10 | ---------
11 |
12 | You can add the visualization setting into the inference setting as followed.
13 |
14 | .. code-block:: yaml
15 |
16 | runtime:
17 | inferencer:
18 | type: base
19 | kwargs:
20 | visualizer:
21 | type: plt
22 | kwargs:
23 | class_names: ['__background__', 'person'] # class names
24 | thresh: 0.5
25 |
26 | Hook
27 | ----
28 |
29 | UP supports visualized hook. You can add the visualization setting into the hook setting for drawing gt and dt boxes in training and evaluting.
30 |
31 | .. code-block:: yaml
32 |
33 | - type: visualize
34 | kwargs:
35 | vis_gt:
36 | type: plt
37 | kwargs:
38 | vis_dir: vis_gt
39 | thresh: 0.3
40 | vis_dt:
41 | type: plt
42 | kwargs:
43 | vis_dir: vis_dt
44 | thresh: 0.3
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/up/tasks/seg/utils/optimizer_helper.py:
--------------------------------------------------------------------------------
1 | from up.utils.model.optimizer_helper import BaseOptimizer
2 | from up.utils.general.registry_factory import OPTIMIZER_REGISTRY
3 |
4 |
5 | __all__ = ['SegformerOptimizer']
6 |
7 |
8 | @OPTIMIZER_REGISTRY.register('segformer')
9 | class SegformerOptimizer(BaseOptimizer):
10 | def get_trainable_params(self, cfg_optim=None):
11 | special_param_group = cfg_optim.get('special_param_group', [])
12 | pconfig = cfg_optim.get('pconfig', None)
13 | if pconfig is not None:
14 | return self.param_group_all(self.model, pconfig)
15 | trainable_params = [{"params": []}] + [{"params": [], "lr": spg['lr'],
16 | "weight_decay": spg['weight_decay']} for spg in special_param_group]
17 | for n, p in self.model.named_parameters():
18 | if p.requires_grad:
19 | gid = 0
20 | for i, spg in enumerate(special_param_group):
21 | if spg['key'] in n:
22 | gid = i + 1
23 | break
24 | trainable_params[gid]["params"].append(p)
25 | return trainable_params
26 |
--------------------------------------------------------------------------------
/docs/source/English/tasks/tasks/3ddet.rst:
--------------------------------------------------------------------------------
1 | 3D detection
2 | ============
3 |
4 | UP supports the whole pipline of training and interfering;
5 |
6 | `Codes `_
7 |
8 | Configs
9 | -------
10 |
11 | It contains the illustration of common configs.
12 |
13 | `Repos `_
14 |
15 | Dataset related modules
16 | -----------------------
17 |
18 | 1. Dataset types:
19 |
20 | * kitti
21 |
22 | 2. The type of datasets can be chosen by setting 'type' in Dataset (default is kitti). The config is as followed.
23 |
24 | .. code-block:: yaml
25 |
26 | dataset:
27 | type: kitti
28 | kwargs:
29 | meta_file: kitti/kitti_infos/kitti_infos_train.pkl
30 | class_names: *class_names
31 | get_item_list: &get_item_list ['points']
32 | training: True
33 | transformer: [*point_sampling, *point_flip,*point_rotation,*point_scaling, *to_voxel_train]
34 | image_reader:
35 | type: kitti
36 | kwargs:
37 | image_dir: kitti/training/
38 | color_mode: None
39 |
--------------------------------------------------------------------------------
/up/tasks/ssl/wrapper/mae.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from up.utils.general.registry_factory import (
3 | MODULE_WRAPPER_REGISTRY
4 | )
5 |
6 |
7 | @MODULE_WRAPPER_REGISTRY.register('MAE')
8 | class MAE(nn.Module):
9 | """
10 | Build a MAE model.
11 | """
12 | def __init__(self, model):
13 | """
14 | model: encoder-decoder model (default: vit-based model)
15 | """
16 | super().__init__()
17 | self.model = model
18 |
19 | def forward_loss(self, imgs, pred, mask):
20 | """
21 | imgs: [N, 3, H, W]
22 | pred: [N, L, p*p*3]
23 | mask: [N, L], 0 is keep, 1 is remove,
24 | """
25 | target = self.patchify(imgs)
26 | if self.norm_pix_loss:
27 | mean = target.mean(dim=-1, keepdim=True)
28 | var = target.var(dim=-1, keepdim=True)
29 | target = (target - mean) / (var + 1.e-6)**.5
30 |
31 | loss = (pred - target) ** 2
32 | loss = loss.mean(dim=-1) # [N, L], mean loss per patch
33 |
34 | loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
35 | return loss
36 |
37 | def forward(self, input):
38 | return self.model(input)
39 |
--------------------------------------------------------------------------------
/up/__init__.py:
--------------------------------------------------------------------------------
1 | try:
2 | from spring_analytics.instrument_agent import InstrumentAgent
3 | Agent = InstrumentAgent()
4 | Agent.instrument_torch_model_lineage()
5 | except Exception:
6 | pass
7 | # flake8: noqa F401
8 | import onnx
9 | import os
10 | import sys
11 | import importlib
12 |
13 | """Set matplotlib up."""
14 | # Import from third library
15 | import matplotlib
16 |
17 | __version__ = "0.3.0" # Available for other modules to import
18 |
19 | # import for register
20 |
21 | from .commands import *
22 | from .runner import *
23 | from .data import *
24 | from .models import *
25 | from .utils import *
26 | from .tasks import *
27 | from .apis import *
28 |
29 | matplotlib.use('Agg') # Use a non-interactive backend
30 |
31 |
32 | def import_plugin():
33 | _PPPATH = 'PLUGINPATH'
34 |
35 | if _PPPATH not in os.environ:
36 | return
37 | path_list = os.environ[_PPPATH].split(':')
38 | for path in path_list:
39 | if path.find('/') >= 0:
40 | base, module = path.rsplit('/', 1)
41 | sys.path.insert(0, base)
42 | importlib.import_module(module)
43 | else:
44 | importlib.import_module(path)
45 |
46 |
47 | # import other auxiliary packages
48 | import_plugin()
49 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/utils/optimizer_helper.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from up.utils.model.optimizer_helper import BaseOptimizer
3 | from up.utils.general.registry_factory import OPTIMIZER_REGISTRY
4 |
5 |
6 | __all__ = ['QatWeightsOptimizer']
7 |
8 |
9 | @OPTIMIZER_REGISTRY.register('qat_weights')
10 | class QatWeightsOptimizer(BaseOptimizer):
11 | def get_trainable_params(self, cfg_optim):
12 | weight_decay = cfg_optim['kwargs']['weight_decay']
13 | trainable_params = [{"params": []}, {"params": []}, {"params": []}, {"params": []}]
14 | for k, v in self.model.named_modules():
15 | if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
16 | trainable_params[2]["params"].append(v.bias) # biases
17 | trainable_params[2]["weight_decay"] = 0.0
18 | if isinstance(v, nn.BatchNorm2d):
19 | trainable_params[0]["params"].append(v.weight) # no decay
20 | trainable_params[0]["weight_decay"] = 0.0
21 | elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
22 | trainable_params[1]["params"].append(v.weight)
23 | trainable_params[1]["weight_decay"] = weight_decay
24 | return trainable_params
25 |
--------------------------------------------------------------------------------
/up/extensions/csrc/softer_nms/softer_nms.h:
--------------------------------------------------------------------------------
1 | #ifndef SOFTER_NMS_H_
2 | #define SOFTER_NMS_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | struct Proposal {
10 | /*
11 | * * Corresponding to 9-channel tensors
12 | * * (x1, y1), (x2, y2): Coordinates of the top-left corner and the bottom-right corner
13 | * * (vx1, vy1, vx2, vy2): Predicted log std variance of each coordinate
14 | * * score: Classification score of the proposal
15 | * */
16 | float x1, y1, x2, y2, vx1, vy1, vx2, vy2, score;
17 | };
18 |
19 | struct Boxes {
20 | /* Coordinates of the top-left corner and the bottom-right corner */
21 | float x1, y1, x2, y2;
22 | };
23 |
24 | enum class IOUMethod : uint32_t
25 | {
26 | /* The methods used in soft-nms*/
27 | LINEAR = 0,
28 | GAUSSIAN,
29 | HARD
30 | };
31 |
32 | enum class Method : uint32_t
33 | {
34 | /* The methods used in softer-nms*/
35 | VAR_VOTING = 0, //newest version: variance voting
36 | SOFTER //deprecated version
37 | };
38 |
39 | int cpu_softer_nms(at::Tensor boxes, at::Tensor inds, float sigma, float iou_thresh,
40 | IOUMethod iou_method, float iou_sigma, Method method);
41 |
42 | #endif
43 |
44 |
--------------------------------------------------------------------------------
/up/extensions/csrc/cross_focal_loss/cross_focal_loss_cuda.cpp:
--------------------------------------------------------------------------------
1 | #include "cross_focal_loss/cross_focal_loss.h"
2 |
3 | using at::Tensor;
4 |
5 |
6 | int cross_focal_loss_sigmoid_forward_cuda(
7 | int N,
8 | Tensor logits,
9 | Tensor targets,
10 | float weight_pos,
11 | float gamma,
12 | float alpha,
13 | int num_classes,
14 | Tensor losses,
15 | Tensor neg_map)
16 | {
17 | // Grab the input tensor
18 | CHECK_INPUT(logits);
19 | CHECK_INPUT(targets);
20 | CHECK_INPUT(losses);
21 | CHECK_INPUT(neg_map);
22 |
23 | CrossSigmoidFocalLossForwardLauncher(
24 | N, logits, targets, weight_pos,
25 | gamma, alpha, num_classes, losses, neg_map);
26 |
27 | return 1;
28 | }
29 |
30 | int cross_focal_loss_sigmoid_backward_cuda(
31 | int N,
32 | Tensor logits,
33 | Tensor targets,
34 | Tensor dX_data,
35 | float weight_pos,
36 | float gamma,
37 | float alpha,
38 | int num_classes,
39 | Tensor neg_map)
40 | {
41 | // Grab the input tensor
42 | CHECK_INPUT(logits);
43 | CHECK_INPUT(targets);
44 | CHECK_INPUT(dX_data);
45 | CHECK_INPUT(neg_map);
46 |
47 | CrossSigmoidFocalLossBackwardLauncher(
48 | N, logits, targets, dX_data,
49 | weight_pos, gamma, alpha, num_classes, neg_map);
50 |
51 | return 1;
52 | }
53 |
--------------------------------------------------------------------------------
/up/tasks/ssl/models/postprocess/ssl_postprocess.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
3 | from up.models.losses import build_loss
4 |
5 | __all__ = ['BaseSslPostProcess']
6 |
7 |
8 | @MODULE_ZOO_REGISTRY.register('base_ssl_postprocess')
9 | class BaseSslPostProcess(nn.Module):
10 | def __init__(self, ssl_loss, prefix=None):
11 | super(BaseSslPostProcess, self).__init__()
12 | if isinstance(ssl_loss, list):
13 | self.ssl_loss = nn.Sequential()
14 | for _loss in ssl_loss:
15 | self.ssl_loss.add_module(_loss['type'], build_loss(_loss))
16 | else:
17 | self.ssl_loss = build_loss(ssl_loss)
18 | self.prefix = prefix if prefix is not None else self.__class__.__name__
19 |
20 | def get_loss(self, input):
21 | loss_info = {}
22 | if isinstance(self.ssl_loss, nn.Sequential):
23 | loss = 0
24 | for name, s_loss in self.ssl_loss.named_parameters():
25 | loss = s_loss(input)
26 | loss_info[f"{self.prefix}_{name}.loss"] = loss
27 | else:
28 | loss = self.ssl_loss(input)
29 | loss_info[f"{self.prefix}.loss"] = loss
30 | return loss_info
31 |
32 | def forward(self, input):
33 | return self.get_loss(input)
34 |
--------------------------------------------------------------------------------
/docs/source/English/tasks/tasks/quant.rst:
--------------------------------------------------------------------------------
1 | Quant
2 | =====
3 |
4 | UP supports the whole pipline of training and interfering;
5 |
6 | `Codes `_
7 |
8 | Configs
9 | -------
10 |
11 | It contains the illustration of common configs.
12 |
13 | `Repos `_
14 |
15 | Dataset related modules
16 | -----------------------
17 |
18 | 1. Dataset types:
19 |
20 | * coco
21 |
22 | 2. The type of datasets can be chosen by setting 'type' in Dataset (default is coco). The config is as followed.
23 |
24 | .. code-block:: yaml
25 |
26 | dataset:
27 | type: coco
28 | kwargs:
29 | ...
30 |
31 | Quant setting
32 | -------------
33 |
34 | .. code-block:: yaml
35 |
36 | quant:
37 | ptq_only: False
38 | deploy_backend: tensorrt
39 | cali_batch_size: 900
40 | prepare_args:
41 | extra_qconfig_dict:
42 | w_observer: MinMaxObserver
43 | a_observer: EMAMinMaxObserver
44 | w_fakequantize: FixedFakeQuantize
45 | a_fakequantize: FixedFakeQuantize
46 | leaf_module: [Space2Depth, FrozenBatchNorm2d]
47 | extra_quantizer_dict:
48 | additional_module_type: [ConvFreezebn2d, ConvFreezebnReLU2d]
49 |
50 |
51 |
--------------------------------------------------------------------------------
/docs/source/English/tutorials/guide/fp16.rst:
--------------------------------------------------------------------------------
1 | FP16
2 | ====
3 |
4 | * Accuracy: Training with fp16 on mask-rcnn, keypoint-rcnn, retinanet, and faster-rcnn gives equal performance to training with fp32 on them.
5 | * Speed: speeding only on V100, e.g., R50-C4-Faster-RCNN using fp16 runs 1.87 times faster compared with that using fp32.
6 | * Memory: R50-C4-Faster-RCNN with fp16 only occupies 46.5% memory compared with that with fp32.
7 |
8 | .. note::
9 |
10 | The optimization of speed and memory are in direct proportion to the size of models.
11 |
12 | Theory & Realize
13 |
14 | * f16 means using Float16 for the training and saving of parameters and fp32 means using Float32 for that. V100 has specific designing for fp16, speeding the training. Directly using fp16 zeros some gradients, decreasing the accuracy. In practice, we use fp32 to copy and save the model, and use scale_factor to change the range of fp16 values.
15 |
16 | * In the process of forwarding, bn layers and losses are computed with fp32, and others are computed with fp16; in the process of backwarding, the gradient with fp16 are copied by fp32 for updating in the optimizer, and the updated gradient will be copied back with fp16.
17 |
18 | * The details can be referenced in `Mixed Precision Traning `_
19 |
20 | .. code-block:: yaml
21 |
22 | runtime:
23 | # dist
24 | fp16: True
25 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/runner/point_runner.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.optim
3 | import numpy as np
4 | from up.utils.env.gene_env import to_device
5 | from up.runner.base_runner import BaseRunner
6 | from up.utils.general.registry_factory import RUNNER_REGISTRY
7 |
8 |
9 | __all__ = ['PointRunner']
10 |
11 |
12 | @RUNNER_REGISTRY.register("point")
13 | class PointRunner(BaseRunner):
14 | def __init__(self, config, work_dir='./', training=True):
15 | super(PointRunner, self).__init__(config, work_dir, training)
16 |
17 | def batch2device(self, batch):
18 | model_dtype = torch.float32
19 | if self.fp16 and self.backend == 'linklink':
20 | model_dtype = self.model.dtype
21 | for key, val in batch.items():
22 | if not isinstance(val, np.ndarray):
23 | continue
24 | elif key in ['frame_id', 'metadata', 'calib', 'voxel_infos', 'class_names', 'velodyne_path', 'image_meta']:
25 | continue
26 | elif key in ['image_shape']:
27 | batch[key] = torch.from_numpy(val).int().cuda()
28 | else:
29 | batch[key] = torch.from_numpy(val).float().cuda()
30 |
31 | if batch['points'].device != torch.device('cuda') or batch['points'].dtype != model_dtype:
32 | batch = to_device(batch, device=torch.device('cuda'), dtype=model_dtype)
33 | return batch
34 |
--------------------------------------------------------------------------------
/configs/det_3d/second/anchors.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "class_name": "Car",
4 | "anchor_sizes": [
5 | [
6 | 3.9,
7 | 1.6,
8 | 1.56
9 | ]
10 | ],
11 | "anchor_rotations": [
12 | 0,
13 | 1.57
14 | ],
15 | "anchor_bottom_heights": [
16 | -1.78
17 | ],
18 | "align_center": false,
19 | "feature_map_stride": 8,
20 | "matched_threshold": 0.6,
21 | "unmatched_threshold": 0.45
22 | },
23 | {
24 | "class_name": "Pedestrian",
25 | "anchor_sizes": [
26 | [
27 | 0.8,
28 | 0.6,
29 | 1.73
30 | ]
31 | ],
32 | "anchor_rotations": [
33 | 0,
34 | 1.57
35 | ],
36 | "anchor_bottom_heights": [
37 | -0.6
38 | ],
39 | "align_center": false,
40 | "feature_map_stride": 8,
41 | "matched_threshold": 0.5,
42 | "unmatched_threshold": 0.35
43 | },
44 | {
45 | "class_name": "Cyclist",
46 | "anchor_sizes": [
47 | [
48 | 1.76,
49 | 0.6,
50 | 1.73
51 | ]
52 | ],
53 | "anchor_rotations": [
54 | 0,
55 | 1.57
56 | ],
57 | "anchor_bottom_heights": [
58 | -0.6
59 | ],
60 | "align_center": false,
61 | "feature_map_stride": 8,
62 | "matched_threshold": 0.5,
63 | "unmatched_threshold": 0.35
64 | }
65 | ]
--------------------------------------------------------------------------------
/configs/det_3d/pointpillar/anchors.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "class_name": "Car",
4 | "anchor_sizes": [
5 | [
6 | 3.9,
7 | 1.6,
8 | 1.56
9 | ]
10 | ],
11 | "anchor_rotations": [
12 | 0,
13 | 1.57
14 | ],
15 | "anchor_bottom_heights": [
16 | -1.78
17 | ],
18 | "align_center": false,
19 | "feature_map_stride": 2,
20 | "matched_threshold": 0.6,
21 | "unmatched_threshold": 0.45
22 | },
23 | {
24 | "class_name": "Pedestrian",
25 | "anchor_sizes": [
26 | [
27 | 0.8,
28 | 0.6,
29 | 1.73
30 | ]
31 | ],
32 | "anchor_rotations": [
33 | 0,
34 | 1.57
35 | ],
36 | "anchor_bottom_heights": [
37 | -0.6
38 | ],
39 | "align_center": false,
40 | "feature_map_stride": 2,
41 | "matched_threshold": 0.5,
42 | "unmatched_threshold": 0.35
43 | },
44 | {
45 | "class_name": "Cyclist",
46 | "anchor_sizes": [
47 | [
48 | 1.76,
49 | 0.6,
50 | 1.73
51 | ]
52 | ],
53 | "anchor_rotations": [
54 | 0,
55 | 1.57
56 | ],
57 | "anchor_bottom_heights": [
58 | -0.6
59 | ],
60 | "align_center": false,
61 | "feature_map_stride": 2,
62 | "matched_threshold": 0.5,
63 | "unmatched_threshold": 0.35
64 | }
65 | ]
--------------------------------------------------------------------------------
/docs/source/Chinese/tasks/tasks/seg.rst:
--------------------------------------------------------------------------------
1 | 分割
2 | ====
3 |
4 | UP支持分割任务训练、部署、推理的全部流程;
5 | `具体代码 `_
6 |
7 | 配置文件
8 | --------
9 |
10 | `代码仓库 `_
11 | 其中包括常用算法配置文件
12 |
13 | 数据集相关模块
14 | --------------
15 |
16 | 1. 数据集类型包括:
17 |
18 | * cityscapes
19 |
20 | 2. 数据集类型通过设置SegDataset的seg_type来选择,默认为cityscapes,配置文件示例如下:
21 |
22 | .. code-block:: yaml
23 |
24 | dataset:
25 | type: cls
26 | kwargs:
27 | seg_type: cityscapes # 默认为cityscapes,选项包括: [cityscapes]
28 | meta_file: cityscapes/fine_train.txt
29 | image_reader:
30 | type: fs_opencv
31 | kwargs:
32 | image_dir: cityscapes
33 | color_mode: RGB
34 | seg_label_reader:
35 | type: fs_opencv
36 | kwargs:
37 | image_dir: cityscapes
38 | color_mode: GRAY
39 | transformer: [*seg_rand_resize, *flip, *seg_crop_train, *to_tensor, *normalize]
40 | num_classes: *num_classes
41 | ignore_label: 255
42 |
43 | 部署模块
44 | --------
45 |
46 | 转换kestrel模型时,需要设置具体配置如下:
47 |
48 | .. code-block:: yaml
49 |
50 | to_kestrel:
51 | toks_type: seg
52 | plugin: psyche
53 | model_name: model # tar模型文件名的前缀以及meta.json中的model_name
54 | version: 1.0.0
55 | resize_hw: 640x1024
56 |
--------------------------------------------------------------------------------
/up/tasks/nas/metax/models/backbones/ssds/base_ssd.py:
--------------------------------------------------------------------------------
1 | import random
2 | from abc import ABCMeta, abstractmethod
3 |
4 |
5 | __all__ = ["BaseSSD"]
6 |
7 |
8 | class BaseSSD(metaclass=ABCMeta):
9 |
10 | @abstractmethod
11 | def __init__(self):
12 | pass
13 |
14 | @abstractmethod
15 | def get_SSD(self):
16 | '''
17 | return base config
18 | '''
19 | pass
20 |
21 | def get_global_SSD(self):
22 | global_SSD_list = []
23 | return global_SSD_list
24 |
25 | def resolve_SSD(self, cell_config, **kwargs):
26 | raise Exception('resolve_SSD needs to be overrided')
27 |
28 | def check_global_var(self):
29 | if hasattr(self.SSD, 'global_var') and len(self.SSD.global_var) > 0:
30 | return True
31 | else:
32 | return False
33 |
34 | def resolve_global_var(self, cell_config):
35 | raise Exception('resolve_global_var needs to be overrided')
36 |
37 | def get_random_samples(self):
38 | global_ssd_list = self.get_global_SSD()
39 | ssd_list = self.get_SSD()
40 | sample = []
41 | for ssd in global_ssd_list:
42 | total_choice = len(ssd)
43 | sample.append(random.randint(0, total_choice - 1))
44 | for i in range(self.cell_num):
45 | for ssd in ssd_list:
46 | total_choice = len(ssd)
47 | sample.append(random.randint(0, total_choice - 1))
48 | return sample
49 |
--------------------------------------------------------------------------------
/up/extensions/csrc/cross_focal_loss/cross_focal_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef CROSS_FOCAL_LOSS_H_
2 | #define CROSS_FOCAL_LOSS_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | using at::Tensor;
9 |
10 | #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
11 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
12 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
13 |
14 | int cross_focal_loss_sigmoid_forward_cuda(
15 | int N,
16 | Tensor logits,
17 | Tensor targets,
18 | float weight_pos,
19 | float gamma,
20 | float alpha,
21 | int num_classes,
22 | Tensor losses,
23 | Tensor neg_map);
24 |
25 | int cross_focal_loss_sigmoid_backward_cuda(
26 | int N,
27 | Tensor logits,
28 | Tensor targets,
29 | Tensor dX_data,
30 | float weight_pos,
31 | float gamma,
32 | float alpha,
33 | int num_classes,
34 | Tensor neg_map);
35 |
36 | int CrossSigmoidFocalLossForwardLauncher(
37 | const int N, Tensor logits,
38 | Tensor targets, const float weight_pos,
39 | const float gamma, const float alpha,
40 | const int num_classes, Tensor losses, Tensor neg_map);
41 |
42 | int CrossSigmoidFocalLossBackwardLauncher(
43 | const int N, Tensor logits,
44 | Tensor targets, Tensor dX_data, const float weight_pos,
45 | const float gamma, const float alpha, const int num_classes,
46 | Tensor neg_map);
47 |
48 | #endif
49 |
--------------------------------------------------------------------------------
/benchmark/3d_detection_benchmark.md:
--------------------------------------------------------------------------------
1 | ## baseline
2 | | model | backbone | anchor | dataset | bs | epoch | 3d_AP Car/Pedestrian/Cyclist| model |
3 | | ----------------------- | --------------- | --- | ----- | -- | --- | ----------------------------- | ----- |
4 | | [pointpillar](https://github.com/ModelTC/United-Perception/tree/main/configs/det_3d/pointpillar/pointpillar.yaml) | PillarVFE | yes | kitti | 32 | 80 | 76.99/49.40/63.35 | [ckpt](https://github.com/ModelTC/United-Perception/releases/download/0.2.0_github/pointpillar.pth) |
5 | | [second](https://github.com/ModelTC/United-Perception/tree/main/configs/det_3d/second/second.yaml)| VoxelBackBone8x | yes | kitti | 32 | 80 | 78.65/53.76/64.23 | [ckpt](https://github.com/ModelTC/United-Perception/releases/download/0.2.0_github/second.pth) |
6 | | [centerpoint_pillar](https://github.com/ModelTC/United-Perception/tree/main/configs/det_3d/centerpoint/centerpoint_pillar.yaml)| PillarVFE | no | kitti | 32 | 80 | 75.00/51.10/60.12 | [ckpt](https://github.com/ModelTC/United-Perception/releases/download/0.2.0_github/centerpoint_pillar.pth) |
7 | | [centerpoint_second](https://github.com/ModelTC/United-Perception/tree/main/configs/det_3d/centerpoint/centerpoint_second.yaml) | VoxelBackBone8x | no | kitti | 32 | 80 | 77.28/54.31/68.20 | [ckpt](https://github.com/ModelTC/United-Perception/releases/download/0.2.0_github/centerpoint_second.pth) |
8 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/guide/trainer.rst:
--------------------------------------------------------------------------------
1 | 训练器配置
2 | ==========
3 |
4 | 该部分用于控制训练过程,包括warmup策略,优化算法,学习率调整等
5 |
6 | .. note::
7 |
8 | * warmup 为逐步放大学习率以平稳训练过程的算法, UP提供了多种warmup方式:"exp", "linear", "no_scale_lr"
9 | * 如果使用 warmup, warmup 初始学习率等于 base_lr * warmup_ratio, warmup 过程结束后的学习率为 base_lr * total_batch_size, total_batch_size 等于 batch size * gpu 的数量
10 | * 配置文件 config 中的 lr 为 batch 中单张图像的学习率,如lr=0.00125,使用 8 张卡(每张 batch size 为 2)的情况下,最终学习率等于0.00125*16=0.02
11 | * 在配置文件 config 中设置 warmup_epochs 或者 warmup_iter 以开启 warmup 过程,warmup_epochs 等同于 warmup_iter,warmup_epochs 会自动转换为 warmup_iter
12 | * only_save_latest参数支持仅保存上一阶段的模型且使用时会使save_freq失效
13 |
14 | .. code-block:: yaml
15 |
16 | trainer: # Required.
17 | max_epoch: 14 # total epochs for the training
18 | test_freq: 14 # test every 14 epochs (当大于max_epoch,则只在训练结束时进行测试)
19 | save_freq: 1 # 模型保存的epoch间隔
20 | # only_save_latest: False # 如果是True,仅保存上一阶段的模型且save_freq失效
21 | optimizer:
22 | type: SGD
23 | kwargs:
24 | lr: 0.00125
25 | momentum: 0.9
26 | weight_decay: 0.0001
27 | lr_scheduler: # lr_scheduler = MultStepLR(optimizer, milestones=[9,14],gamma=0.1)
28 | warmup_epochs: 1 # set to be 0 to disable warmup.
29 | # warmup_type: exp
30 | type: MultiStepLR
31 | kwargs:
32 | milestones: [9,12] # epochs to decay lr
33 | gamma: 0.1 # decay rate
34 |
--------------------------------------------------------------------------------
/up/utils/env/analysis_utils.py:
--------------------------------------------------------------------------------
1 | import psutil
2 | from .dist_helper import env
3 |
4 |
5 | def get_sigle_node_memory_info(get_total=False, node=0):
6 | mem = psutil.virtual_memory()
7 | memory_info = {}
8 | prefix = 'node'
9 | if get_total:
10 | mem_total = mem.total / 1024 / 1024 / 1024.
11 | memory_info[f'{prefix}_mem_total'] = round(mem_total, 3)
12 | mem_used = mem.used / 1024 / 1024 / 1024.
13 | memory_info[f'{prefix}_mem_used'] = round(mem_used, 3)
14 | mem_used_per = mem.percent
15 | memory_info[f'{prefix}_mem_used_percent'] = mem_used_per
16 | swap_mem = psutil.swap_memory()
17 | if get_total:
18 | swap_mem_total = swap_mem.total / 1024 / 1024 / 1024.
19 | memory_info[f'{prefix}_swap_mem_total'] = round(swap_mem_total, 3)
20 | swap_mem_per = swap_mem.percent
21 | memory_info[f'{prefix}_swap_mem_used_percent'] = swap_mem_per
22 | return memory_info
23 |
24 |
25 | def get_memory_info(get_total=False, gpu_per_node=8):
26 | memory_info = {}
27 | node_list = split_node(gpu_per_node)
28 | if env.rank in node_list:
29 | temp_info = get_sigle_node_memory_info(get_total, env.rank // gpu_per_node)
30 | memory_info.update(temp_info)
31 | return memory_info, node_list
32 |
33 |
34 | def split_node(gpu_per_node=8):
35 | world_size = env.world_size
36 | if world_size <= gpu_per_node:
37 | return [0]
38 | assert world_size % gpu_per_node == 0
39 | max_node = world_size // gpu_per_node
40 | return [i * gpu_per_node for i in range(max_node)]
41 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tasks/tasks/sparse.rst:
--------------------------------------------------------------------------------
1 | 稀疏训练
2 | ========
3 |
4 | UP支持稀疏训练;
5 | `具体代码 `_
6 |
7 | 配置文件
8 | --------
9 |
10 | `代码仓库 `_
11 | 其中包括常用算法配置文件
12 |
13 | 数据集相关模块
14 | --------------
15 |
16 | 1. 数据集类型包括:
17 |
18 | * imagenet
19 | * custom_cls
20 | * coco
21 |
22 | 2. 数据集类型通过设置Dataset的type来选择,配置文件示例如下:
23 |
24 | .. code-block:: yaml
25 |
26 | dataset:
27 | type: cls * 或者 coco
28 | kwargs:
29 | ...
30 |
31 | 3. 后续数据集写法同选择类型。
32 |
33 | 稀疏训练设置
34 | ------------
35 |
36 | .. code-block:: yaml
37 |
38 | runtime:
39 | runner:
40 | type: sparse
41 |
42 | sparsity:
43 | mask_generator:
44 | type: NormalMaskGenerator
45 | fake_sparse:
46 | type: FakeSparse
47 | scheduler:
48 | type: AmbaLevelPruneScheduler
49 | kwargs:
50 | total_iters: None
51 | sparsity_table: [30,40,50,60]
52 | no_prune_keyword: ''
53 | no_prune_layer: ''
54 | prun_algo: 1
55 | prun_algo_tuning: 0.5
56 | dw_no_prune: False
57 | do_sparse_analysis: False
58 | output_dir: path_to/amba/faster_rcnn_r50_fpn_improve_amba_sparse_30_to_90/sparse_analysis
59 | save_dir: path_to/amba/faster_rcnn_r50_fpn_improve_amba_sparse_30_to_90/sparse_ckpts
60 | leaf_module: [Space2Depth, FrozenBatchNorm2d]
61 |
62 |
63 |
--------------------------------------------------------------------------------
/docs/source/Chinese/benchmark/index.rst:
--------------------------------------------------------------------------------
1 | 基准/Benchmark
2 | ==============
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | UP 各版本性能基准参见以下链接
8 |
9 | Det
10 | ---
11 |
12 | v0.3.0 基准 `detection benchmark v0.3.0 `_
13 |
14 | 3DDet
15 | -----
16 |
17 | v0.3.0 基准 `3D detection benchmark v0.3.0 `_
18 |
19 | Cls
20 | ---
21 |
22 | v0.3.0 基准 `classification benchmark v0.3.0 `_
23 |
24 | Seg
25 | ---
26 |
27 | v0.3.0 基准 `segmentation v0.3.0 `_
28 |
29 | Multitask
30 | ---------
31 |
32 | v0.3.0 基准 `multitask v0.3.0 `_
33 |
34 | Distillation
35 | ------------
36 |
37 | v0.3.0 基准 `distillation v0.3.0 `_
38 |
39 | SSL
40 | ------------
41 |
42 | v0.3.0 基准 `self-supervised learning v0.3.0 `_
43 |
44 | Quant
45 | -----
46 |
47 | v0.3.0 基准 `quant v0.3.0 `_
48 |
49 | Sparse
50 | ------
51 |
52 | v0.3.0 基准 `sparse training v0.3.0 `_
53 |
--------------------------------------------------------------------------------
/benchmark/distillation.md:
--------------------------------------------------------------------------------
1 | # Distillation
2 |
3 | ## Detection
4 |
5 | Results on COCO dataset. Teacher and Student performance:
6 |
7 | |config | scheduler | AP | AP50 | AP75 | APs | APm | APl |
8 | |------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
9 | | Student(Res50) | 12e | 38.0 | 59.1 | 41.1 | 23.0 | 41.9 | 48.3 |
10 | | Teacher(Res152) | 12e | 42.3 | 63.4 | 46.2 | 26.3 | 46.5 | 54.0 |
11 |
12 | Mimic methods performance:
13 |
14 | |config | scheduler | AP | AP50 | AP75 | APs | APm | APl |
15 | |------|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
16 | | [baseline(neck_mimic)](https://github.com/ModelTC/United-Perception/blob/main/configs/distiller/det/faster_rcnn/faster_rcnn_r152_50_1x_feature_mimic.yaml) | 12e | 39.5 | 60.5 | 43.2 | 24.2 | 42.9 | 51.5 |
17 | | [SampleFeature](https://github.com/ModelTC/United-Perception/blob/main/configs/distiller/det/faster_rcnn/faster_rcnn_r152_50_1x_sample_feature_mimic.yaml) | 12e | 39.8 | 60.7 | 43.2 | 24.1 | 43.5 | 50.7 |
18 | | [FRS](https://github.com/ModelTC/United-Perception/blob/main/configs/distiller/det/faster_rcnn/faster_rcnn_r152_50_1x_frs.yaml) | 12e | 40.9 | 61.5 | 44.4 | 24.5 | 45.0 | 52.7 |
19 | | [DeFeat](https://github.com/ModelTC/United-Perception/blob/main/configs/distiller/det/faster_rcnn/faster_rcnn_r152_50_1x_decouple_feature_mimic.yaml) | 12e | 41.0 | 61.8 | 44.8 | 24.0 | 45.4 | 53.3 |
20 |
21 | An advanced usage of the distillation is to mimic one student with multi teachers by multi methods like [link](https://github.com/ModelTC/United-Perception/blob/main/configs/distiller/det/faster_rcnn/faster_rcnn_r152_50_1x_multi_jobs_multi_teacheres.yaml)
22 |
23 |
--------------------------------------------------------------------------------
/up/tasks/sparse/models/heads/cls_head.py:
--------------------------------------------------------------------------------
1 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
2 | from up.tasks.cls.models.heads import BaseClsHead, ConvNeXtHead
3 |
4 | __all__ = ['SparseBaseClsHead', 'SparseConvNeXtHead']
5 |
6 |
7 | @MODULE_ZOO_REGISTRY.register('sparse_base_cls_head')
8 | class SparseBaseClsHead(BaseClsHead):
9 | def __init__(self, num_classes, in_plane, input_feature_idx=-1, use_pool=True, dropout=None):
10 | super(SparseBaseClsHead, self).__init__(num_classes, in_plane, input_feature_idx=-1,
11 | use_pool=True, dropout=None)
12 |
13 | def forward_net(self, x):
14 | x = x['features'][self.input_feature_idx]
15 | x = self.get_pool_output(x)
16 | x = self.get_dropout(x)
17 | logits = self.get_logits(x)
18 | return {'logits': logits}
19 |
20 |
21 | @MODULE_ZOO_REGISTRY.register('sparse_convnext_head')
22 | class SparseConvNeXtHead(ConvNeXtHead):
23 | def __init__(self,
24 | num_classes,
25 | in_plane,
26 | input_feature_idx=-1,
27 | head_init_scale=1.,
28 | use_pool=True,
29 | dropout=None):
30 | super(SparseConvNeXtHead, self).__init__(num_classes, in_plane, input_feature_idx, use_pool, dropout)
31 |
32 | def forward_net(self, x):
33 | x = x['features'][self.input_feature_idx]
34 | x = self.get_pool_output(x)
35 | x = self.layer_norm(x)
36 | x = self.get_dropout(x)
37 | logits = self.get_logits(x)
38 | return {'logits': logits}
39 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tasks/tasks/multitask.rst:
--------------------------------------------------------------------------------
1 | 多任务联合训练
2 | ==============
3 |
4 | UP支持多任务的联合训练,推理; 训练流程为每个task分支网络处理对应的训练数据,计算loss,之后将各任务loss计算总和并反向传播更新梯度
5 | `具体代码 `_
6 |
7 | 配置文件
8 | --------
9 |
10 | `配置文件 `_
11 | 其中包括示例配置文件
12 |
13 | 配置文件示例
14 | ------------
15 |
16 | 具体配置细节可以参考上述配置文件仓库,以下为必要设置示例:
17 |
18 | .. code-block:: yaml
19 |
20 | # runner: 修改runner
21 | runtime:
22 | runner:
23 | type: multitask
24 | task_names: &task_names [det, cls]
25 | ...
26 |
27 | # dataset部分需要写出所需task的train/test,必须包含train字段(作为主task,epoch等按照主task来计算)
28 | dataset:
29 | train: ...
30 | test: ...
31 | train_cls: ...
32 | test_cls: ...
33 |
34 | # multitask cfg
35 | multitask_cfg:
36 | notions:
37 | det: &det 0 # 后续用引用代替数字,更稳妥
38 | cls: &cls 1
39 | task_names: *task_names # task名称
40 | datasets: # 指定每个task的训练/测试数据集
41 | train: [train, train_cls]
42 | test: [test, test_cls]
43 | debug: &multitask_debug false
44 |
45 | # net: 需要对每个head做一些特殊的操作,方便控制流程
46 | - name: roi_head
47 | prev: neck
48 | type: RetinaHeadWithBN
49 | kwargs:
50 | ...
51 | wrappers:
52 | - &det_wrapper
53 | type: multitask_wrapper
54 | kwargs:
55 | cfg:
56 | idxs: [ *det ] # 主要通过这个idxs控制,此处表明这个head只会在det数据时forward
57 | debug: *multitask_debug
58 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolov5/utils/optimizer_helper.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from up.utils.model.optimizer_helper import BaseOptimizer
3 | from up.utils.general.registry_factory import OPTIMIZER_REGISTRY
4 |
5 |
6 | __all__ = ['Yolov5Optimizer']
7 |
8 |
9 | @OPTIMIZER_REGISTRY.register('yolov5')
10 | class Yolov5Optimizer(BaseOptimizer):
11 | def get_trainable_params(self, cfg_optim):
12 | weight_decay = cfg_optim['kwargs']['weight_decay']
13 | trainable_params = [{"params": []}, {"params": []}, {"params": []}, {"params": []}]
14 |
15 | trainable_params_list = []
16 | for k, v in self.model.named_modules():
17 | if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
18 | trainable_params[2]["params"].append(v.bias) # biases
19 | trainable_params_list.append(k + ".bias")
20 | trainable_params[2]["weight_decay"] = 0.0
21 | if isinstance(v, nn.BatchNorm2d):
22 | trainable_params[0]["params"].append(v.weight) # no decay
23 | trainable_params_list.append(k + ".weight")
24 | trainable_params[0]["weight_decay"] = 0.0
25 | elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
26 | trainable_params[1]["params"].append(v.weight)
27 | trainable_params_list.append(k + ".weight")
28 | trainable_params[1]["weight_decay"] = weight_decay
29 |
30 | for n, p in self.model.named_parameters():
31 | if n not in trainable_params_list:
32 | trainable_params[-1]["params"].append(p)
33 |
34 | return trainable_params
35 |
--------------------------------------------------------------------------------
/up/tasks/ssl/data/ssl_dataset.py:
--------------------------------------------------------------------------------
1 | import json
2 | from up.utils.general.petrel_helper import PetrelHelper
3 | from up.tasks.cls.data.cls_dataset import BaseParser, CLS_PARSER_REGISTRY
4 |
5 |
6 | @CLS_PARSER_REGISTRY.register('moco_imagenet')
7 | class MoCoParser(BaseParser):
8 | def parse(self, meta_file, idx, metas, start_index=0, rank_indices=None):
9 | with PetrelHelper.open(meta_file) as f:
10 | for index, line in enumerate(f):
11 | if rank_indices is not None:
12 | if (start_index + index) not in rank_indices:
13 | continue
14 | cls_res = {}
15 | filename, label = line.strip().split()
16 | cls_res['filename'] = filename
17 | cls_res['label'] = 0
18 | cls_res['image_source'] = idx
19 | metas.append(cls_res)
20 | return metas
21 |
22 |
23 | @CLS_PARSER_REGISTRY.register('moco_custom')
24 | class CustomMoCoParser(BaseParser):
25 | def parse(self, meta_file, idx, metas, start_index=0, rank_indices=None):
26 | with PetrelHelper.open(meta_file) as f:
27 | for index, line in enumerate(f):
28 | if rank_indices is not None:
29 | if (start_index + index) not in rank_indices:
30 | continue
31 | cls_res = {}
32 | res = json.loads(line.strip())
33 | filename = res['filename']
34 | cls_res['label'] = 0
35 | cls_res['filename'] = filename
36 | cls_res['image_source'] = idx
37 | metas.append(cls_res)
38 | return metas
39 |
--------------------------------------------------------------------------------
/docs/source/English/benchmark/index.rst:
--------------------------------------------------------------------------------
1 | Benchmark
2 | =========
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | See the following links for performance benchmarks for each version of UP.
8 |
9 | Det
10 | ---
11 |
12 | v0.3.0 baseline `detection benchmark v0.3.0 `_
13 |
14 | 3DDet
15 | -----
16 | v0.3.0 baseline `3D detection benchmark v0.3.0 `_
17 |
18 | Cls
19 | ---
20 |
21 | v0.3.0 baseline `classification benchmark v0.3.0 `_
22 |
23 | Seg
24 | ---
25 |
26 | v0.3.0 baseline `segmentation v0.3.0 `_
27 |
28 | Multitask
29 | ---------
30 |
31 | v0.3.0 baseline `multitask v0.3.0 `_
32 |
33 | Distillation
34 | ------------
35 |
36 | v0.3.0 baseline `distillation v0.3.0 `_
37 |
38 | SSL
39 | ------------
40 |
41 | v0.3.0 baseline `self-supervised learning v0.3.0 `_
42 |
43 | Quant
44 | -----
45 |
46 | v0.3.0 baseline `quant v0.3.0 `_
47 |
48 | Sparse
49 | ------
50 |
51 | v0.3.0 baseline `sparse training v0.3.0 `_
52 |
--------------------------------------------------------------------------------
/docs/source/English/tasks/tasks/seg.rst:
--------------------------------------------------------------------------------
1 | Segmentation
2 | ============
3 |
4 | UP supports the whole pipline of training, deploying, and interfering;
5 |
6 | `Codes `_
7 |
8 | Configs
9 | -------
10 |
11 | It contains the illustration of common configs.
12 |
13 | `Repos `_
14 |
15 | Dataset related modules
16 | -----------------------
17 |
18 | 1. Dataset types:
19 |
20 | * cityscapes
21 |
22 | 2. The type of datasets can be chosen by setting 'seg_type' in SegDataset (default is cityscapes). The config is as followed.
23 |
24 | .. code-block:: yaml
25 |
26 | dataset:
27 | type: cls
28 | kwargs:
29 | seg_type: cityscapes # Default is cityscapes. Options: [cityscapes]
30 | meta_file: cityscapes/fine_train.txt
31 | image_reader:
32 | type: fs_opencv
33 | kwargs:
34 | image_dir: cityscapes
35 | color_mode: RGB
36 | seg_label_reader:
37 | type: fs_opencv
38 | kwargs:
39 | image_dir: cityscapes
40 | color_mode: GRAY
41 | transformer: [*seg_rand_resize, *flip, *seg_crop_train, *to_tensor, *normalize]
42 | num_classes: *num_classes
43 | ignore_label: 255
44 |
45 | Deploying model
46 | ---------------
47 |
48 | Kestrel config needs to be set while converting models:
49 |
50 | .. code-block:: yaml
51 |
52 | to_kestrel:
53 | toks_type: seg
54 | plugin: psyche
55 | model_name: model # prefix of tar-model filename and model_name in meta.json
56 | version: 1.0.0
57 | resize_hw: 640x1024
58 |
--------------------------------------------------------------------------------
/up/utils/model/act_fn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 |
6 |
7 | class SiLU(nn.Module):
8 | @staticmethod
9 | def forward(x):
10 | return x * torch.sigmoid(x)
11 |
12 |
13 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
14 | @staticmethod
15 | def forward(x):
16 | # return x * F.hardsigmoid(x) # for torchscript and CoreML
17 | return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
18 |
19 |
20 | class GELU(nn.Module):
21 | @staticmethod
22 | def forward(x):
23 | erf = F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))
24 | return 0.5 * x * (1 + erf)
25 |
26 |
27 | _act_cfg = {
28 | 'Hardswish': ('act', torch.nn.Hardswish if hasattr(torch.nn, 'Hardswish') else Hardswish),
29 | 'LeakyReLU': ('act', torch.nn.LeakyReLU),
30 | 'ReLU': ('act', torch.nn.ReLU),
31 | 'Identity': ('act', torch.nn.Identity),
32 | 'Silu': ('act', torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else SiLU),
33 | 'GELU': ('act', torch.nn.GELU if hasattr(torch.nn, 'GELU') else GELU)
34 | }
35 |
36 |
37 | def build_act_fn(cfg, postfix=''):
38 | assert isinstance(cfg, dict) and 'type' in cfg
39 | cfg = cfg.copy()
40 | act_type = cfg.pop('type')
41 | kwargs = cfg.get('kwargs', {})
42 |
43 | if act_type not in _act_cfg:
44 | raise KeyError('Unrecognized act type {}'.format(act_type))
45 | else:
46 | abbr, act_layer = _act_cfg[act_type]
47 | if act_layer is None:
48 | raise NotImplementedError
49 |
50 | assert isinstance(postfix, (int, str))
51 | name = abbr + str(postfix)
52 |
53 | layer = act_layer(**kwargs)
54 | return name, layer
55 |
--------------------------------------------------------------------------------
/up/tasks/distill/losses/l2_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | # from up.utils.general.log_helper import default_logger as logger
5 | from up.utils.general.registry_factory import MIMIC_LOSS_REGISTRY
6 |
7 |
8 | @MIMIC_LOSS_REGISTRY.register('l2_loss')
9 | class L2Loss(nn.Module):
10 | """
11 | L2 Loss
12 | """
13 | def __init__(self, feat_norm=False, batch_mean=False, loss_weight=1.0):
14 | super().__init__()
15 | self.loss_weight = loss_weight
16 | self.feat_norm = feat_norm
17 | self.batch_mean = batch_mean
18 |
19 | def forward(self, s_features, t_features, masks=None):
20 | assert isinstance(s_features, list) and isinstance(t_features, list), 'features must be list!'
21 | if masks is not None:
22 | assert isinstance(masks, list) and len(masks) == len(s_features), 'masks must be consistent with features!'
23 | else:
24 | masks = [s.new_ones(s.shape).float().detach() for s in s_features]
25 | total_loss = 0
26 | for idx, (s, t) in enumerate(zip(s_features, t_features)):
27 | if self.feat_norm:
28 | s = self.normalize_feature(s)
29 | t = self.normalize_feature(t)
30 | masks[idx] = masks[idx].view(s.size(0), -1)
31 | loss = torch.sum(torch.pow(torch.add(s, -1, t), 2) * masks[idx])
32 | if self.batch_mean:
33 | loss = loss / s.size(0)
34 | elif masks[idx].sum() != 0:
35 | loss = loss / masks[idx].sum()
36 | total_loss += loss
37 | return total_loss * self.loss_weight
38 |
39 | def normalize_feature(self, x, mult=1.0):
40 | x = x.view(x.size(0), -1)
41 | return x / x.norm(2, dim=1, keepdim=True) * mult
42 |
--------------------------------------------------------------------------------
/docs/source/English/tasks/tasks/sparse.rst:
--------------------------------------------------------------------------------
1 | Sparse training
2 | ===============
3 |
4 | UP supports sparse training;
5 | `Codes `_
6 |
7 | Configs
8 | -------
9 |
10 | It contains the illustration of common configs.
11 |
12 | `Repos `_
13 |
14 | Dataset related modules
15 | -----------------------
16 |
17 | 1. Dataset types:
18 |
19 | * imagenet
20 | * custom_cls
21 | * coco
22 |
23 | 2. The type of datasets can be chosen by setting 'type' in Dataset. The config is as followed.
24 |
25 | .. code-block:: yaml
26 |
27 | dataset:
28 | type: cls * or coco
29 | kwargs:
30 | ...
31 |
32 | 3. The following written should refer to the chosen dataset.
33 |
34 | Sparse training setting
35 | -----------------------
36 |
37 | .. code-block:: yaml
38 |
39 | runtime:
40 | runner:
41 | type: sparse
42 |
43 | sparsity:
44 | mask_generator:
45 | type: NormalMaskGenerator
46 | fake_sparse:
47 | type: FakeSparse
48 | scheduler:
49 | type: AmbaLevelPruneScheduler
50 | kwargs:
51 | total_iters: None
52 | sparsity_table: [30,40,50,60]
53 | no_prune_keyword: ''
54 | no_prune_layer: ''
55 | prun_algo: 1
56 | prun_algo_tuning: 0.5
57 | dw_no_prune: False
58 | do_sparse_analysis: False
59 | output_dir: path_to/amba/faster_rcnn_r50_fpn_improve_amba_sparse_30_to_90/sparse_analysis
60 | save_dir: path_to/amba/faster_rcnn_r50_fpn_improve_amba_sparse_30_to_90/sparse_ckpts
61 | leaf_module: [Space2Depth, FrozenBatchNorm2d]
62 |
63 |
64 |
--------------------------------------------------------------------------------
/up/tasks/nas/metax/models/backbones/ssds/xmnet_ssd.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 | from .base_ssd import BaseSSD
3 |
4 |
5 | __all__ = ['xmnetSSD']
6 |
7 |
8 | class xmnetSSD(BaseSSD):
9 | def __init__(self):
10 | super(xmnetSSD, self).__init__()
11 |
12 | self.SSD = edict()
13 | self.SSD.block_type = ['mbconv', 'fuseconv']
14 | self.SSD.kernel_size_choice = [3, 5, 7]
15 | self.SSD.repeat_choice = [-2, -1, 0, 1, 2]
16 | self.SSD.channel_choice = [0.5, 0.75, 1.0, 1.25, 1.5]
17 | self.SSD.expansion_choice = [2, 3, 4, 5, 6]
18 |
19 | self.cell_num = 5
20 |
21 | def get_SSD(self):
22 | SSD_list = []
23 | SSD_list.append(self.SSD.block_type)
24 | SSD_list.append(self.SSD.kernel_size_choice)
25 | SSD_list.append(self.SSD.repeat_choice)
26 | SSD_list.append(self.SSD.channel_choice)
27 | SSD_list.append(self.SSD.expansion_choice)
28 | return SSD_list
29 |
30 | def resolve_SSD(self, cell_config, BlockArgs, **kwargs):
31 | block_type_index, kernel_size_index, repeats_index, channel_choice_index, expansion_choice_index = cell_config
32 |
33 | # block_type_idx, filter_ratio_idx, layers_idx, module_num_idx = cell_config
34 | block_type = self.SSD.block_type[block_type_index]
35 | kernel_size = self.SSD.kernel_size_choice[kernel_size_index]
36 | repeat = self.SSD.repeat_choice[repeats_index]
37 | channel = self.SSD.channel_choice[channel_choice_index]
38 | expansion = self.SSD.expansion_choice[expansion_choice_index]
39 |
40 | return BlockArgs(
41 | block=block_type,
42 | kernel_size=kernel_size,
43 | channel=channel,
44 | repeat=repeat,
45 | expansion=expansion
46 | )
47 |
--------------------------------------------------------------------------------
/up/utils/general/checkpoint.py:
--------------------------------------------------------------------------------
1 | # Standard Library
2 | import warnings
3 |
4 | # Import from third library
5 | import torch
6 | from torch.utils.checkpoint import checkpoint
7 |
8 |
9 | def check_backward_validity(inputs):
10 | if not any(inp.requires_grad for inp in inputs if isinstance(inp, torch.Tensor)):
11 | warnings.warn("None of the inputs have requires_grad=True. Gradients will be None")
12 | return False
13 | return True
14 |
15 |
16 | def fully_checkpoint_sequential(functions, segments, *inputs):
17 | r"""Modified version of torch.utils.checkpoint.checkpoint_sequential for memory efficiency.
18 | It is assumed that at least one of the inputs have requires_grad=True, so we can checkpoint
19 | all of the segments at ease.
20 | Please refer to https://pytorch.org/docs/stable/checkpoint.html#torch.utils.checkpoint.checkpoint_sequential
21 | for more details.
22 | """
23 | assert check_backward_validity(inputs), "At least one of the inputs needs requires_grad=True"
24 |
25 | def run_function(start, end, functions):
26 | def forward(*inputs):
27 | input = inputs[0]
28 | for j in range(start, end + 1):
29 | input = functions[j](input)
30 | return input
31 | return forward
32 |
33 | if isinstance(functions, torch.nn.Sequential):
34 | functions = list(functions.children())
35 |
36 | segment_size = len(functions) // segments
37 | end = -1
38 | for start in range(0, segment_size * (segments - 1), segment_size):
39 | end = start + segment_size - 1
40 | inputs = checkpoint(run_function(start, end, functions), *inputs)
41 | if not isinstance(inputs, tuple):
42 | inputs = (inputs,)
43 | return checkpoint(run_function(end + 1, len(functions) - 1, functions), *inputs)
44 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/data_loader.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 |
3 | # Import from third library
4 | from torch.utils.data import DataLoader
5 |
6 | from up.utils.general.registry_factory import DATALOADER_REGISTRY, BATCHING_REGISTRY
7 | from up.data.samplers.batch_sampler import InfiniteBatchSampler
8 |
9 |
10 | __all__ = ['PointDataLoader']
11 |
12 |
13 | @DATALOADER_REGISTRY.register('point')
14 | class PointDataLoader(DataLoader):
15 | def __init__(self,
16 | dataset,
17 | alignment=1,
18 | batch_size=1,
19 | shuffle=False,
20 | sampler=None,
21 | batch_sampler=None,
22 | num_workers=0,
23 | pin_memory=False,
24 | drop_last=False,
25 | pad_value=0,
26 | pad_type='batch_pad',
27 | worker_init=False):
28 | worker_init_fn = None
29 | if worker_init:
30 | from up.utils.env.gene_env import worker_init_reset_seed
31 | worker_init_fn = worker_init_reset_seed
32 | super(
33 | PointDataLoader,
34 | self).__init__(
35 | dataset,
36 | batch_size,
37 | shuffle,
38 | sampler,
39 | batch_sampler,
40 | num_workers,
41 | dataset.collate_batch,
42 | pin_memory,
43 | drop_last,
44 | worker_init_fn=worker_init_fn)
45 | self.pad = BATCHING_REGISTRY.get(pad_type)(alignment, pad_value)
46 |
47 | def get_data_size(self):
48 | return self.get_epoch_size()
49 |
50 | def get_epoch_size(self):
51 | if isinstance(self.batch_sampler, InfiniteBatchSampler):
52 | return len(self.batch_sampler.batch_sampler) # training
53 | return len(self.batch_sampler)
54 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/efl/utils/optimizer_helper.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from up.utils.model.optimizer_helper import BaseOptimizer
3 | from up.utils.general.registry_factory import OPTIMIZER_REGISTRY
4 |
5 |
6 | __all__ = ['Yolov5ExceptOptimizer']
7 |
8 |
9 | @OPTIMIZER_REGISTRY.register('yolov5_except')
10 | class Yolov5ExceptOptimizer(BaseOptimizer):
11 | def get_trainable_params(self, cfg_optim):
12 | weight_decay = cfg_optim['kwargs']['weight_decay']
13 | except_keys = cfg_optim['kwargs'].pop('except_keys', [])
14 | trainable_params = [{"params": []}, {"params": []}, {"params": []}]
15 | network_keys = []
16 | for k, v in self.model.named_modules():
17 | network_keys.append(k)
18 | for k in except_keys:
19 | assert k in network_keys, f'Current network has no key called {k}'
20 | for k, v in self.model.named_modules():
21 | if k in except_keys:
22 | assert hasattr(v, 'weight'), f'Except key {k} must have weight value'
23 | if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter) and k not in except_keys:
24 | trainable_params[2]["params"].append(v.bias) # biases
25 | trainable_params[2]["weight_decay"] = 0.0
26 | if isinstance(v, nn.BatchNorm2d):
27 | trainable_params[0]["params"].append(v.weight) # no decay
28 | trainable_params[0]["weight_decay"] = 0.0
29 | elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
30 | trainable_params[1]["params"].append(v.weight)
31 | if k in except_keys and hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
32 | trainable_params[1]["params"].append(v.bias)
33 | trainable_params[1]["weight_decay"] = weight_decay
34 | return trainable_params
35 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/metrics/kitti_object_eval_python/README.md:
--------------------------------------------------------------------------------
1 | # kitti-object-eval-python
2 | **Note**: This is borrowed from [traveller59/kitti-object-eval-python](https://github.com/traveller59/kitti-object-eval-python)
3 |
4 | Fast kitti object detection eval in python(finish eval in less than 10 second), support 2d/bev/3d/aos. , support coco-style AP. If you use command line interface, numba need some time to compile jit functions.
5 | ## Dependencies
6 | Only support python 3.6+, need `numpy`, `skimage`, `numba`, `fire`. If you have Anaconda, just install `cudatoolkit` in anaconda. Otherwise, please reference to this [page](https://github.com/numba/numba#custom-python-environments) to set up llvm and cuda for numba.
7 | * Install by conda:
8 | ```
9 | conda install -c numba cudatoolkit=x.x (8.0, 9.0, 9.1, depend on your environment)
10 | ```
11 | ## Usage
12 | * commandline interface:
13 | ```
14 | python evaluate.py evaluate --label_path=/path/to/your_gt_label_folder --result_path=/path/to/your_result_folder --label_split_file=/path/to/val.txt --current_class=0 --coco=False
15 | ```
16 | * python interface:
17 | ```Python
18 | import kitti_common as kitti
19 | from eval import get_official_eval_result, get_coco_eval_result
20 | def _read_imageset_file(path):
21 | with open(path, 'r') as f:
22 | lines = f.readlines()
23 | return [int(line) for line in lines]
24 | det_path = "/path/to/your_result_folder"
25 | dt_annos = kitti.get_label_annos(det_path)
26 | gt_path = "/path/to/your_gt_label_folder"
27 | gt_split_file = "/path/to/val.txt" # from https://xiaozhichen.github.io/files/mv3d/imagesets.tar.gz
28 | val_image_ids = _read_imageset_file(gt_split_file)
29 | gt_annos = kitti.get_label_annos(gt_path, val_image_ids)
30 | print(get_official_eval_result(gt_annos, dt_annos, 0)) # 6s in my computer
31 | print(get_coco_eval_result(gt_annos, dt_annos, 0)) # 18s in my computer
32 | ```
33 |
--------------------------------------------------------------------------------
/docs/source/English/tutorials/guide/trainer.rst:
--------------------------------------------------------------------------------
1 | Trainer
2 | =======
3 |
4 | This part introduces how to control the training including warmup strategies, optimization algorithms, adjusting learning rate, and so on.
5 |
6 | .. note::
7 |
8 | * warmup is used to gradually raise the learning rate for smoothing the training, which contains 'exp', 'linear', and 'no_scale_lr'.
9 | * The initial warmup learning rate equals 'base_lr' * 'warmup_ratio'. The learning rate equals 'base_lr' * 'total_batch_size' after warmup, and the 'total_batch_size' equals 'batch size' * 'gpu'.
10 | * 'lr' in config is the learning rate of a single image. For example, using 8 GPUs with 2 batches and lr=0.00125 makes the final learning rate equal 0.00125 * 16 = 0.02.
11 | * Starting warmup by setting 'warmup_epochs' or 'warmup_iter' in the config. 'warmup_epochs' will be transformed into 'warmup_iter'.
12 | * 'only_save_latest' supports only keeping the latest model and will invalidate 'save_freq'.
13 |
14 | .. code-block:: yaml
15 |
16 | trainer: # Required.
17 | max_epoch: 14 # total epochs for the training
18 | test_freq: 14 # test every 14 epochs (Only tesing after all training when it is larger than max_epoch)
19 | save_freq: 1 # save model every save_freq epoches.
20 | # only_save_latest: False # if True, only keep the latest model and invalidate save_freq.
21 | optimizer:
22 | type: SGD
23 | kwargs:
24 | lr: 0.00125
25 | momentum: 0.9
26 | weight_decay: 0.0001
27 | lr_scheduler: # lr_scheduler = MultStepLR(optimizer, milestones=[9,14],gamma=0.1)
28 | warmup_epochs: 1 # set to be 0 to disable warmup.
29 | # warmup_type: exp
30 | type: MultiStepLR
31 | kwargs:
32 | milestones: [9,12] # epochs to decay lr
33 | gamma: 0.1 # decay rate
34 |
--------------------------------------------------------------------------------
/up/tasks/det/models/utils/assigner.py:
--------------------------------------------------------------------------------
1 | # Import from third library
2 | import torch
3 | from up.utils.general.global_flag import ALIGNED_FLAG
4 |
5 |
6 | def get_rois_target_levels(levels, base_scale, rois, base_level=0, minus_min_lvl=False):
7 | """
8 | Assign proposals to different level feature map to roi pooling
9 |
10 | Arguments:
11 | rois (FloatTensor): [R, 5] (batch_ix, x1, y1, x2, y2)
12 | levels (list of int): [L], levels. e.g.[2, 3, 4, 5, 6]
13 | base_scale: scale of the minimum level
14 | """
15 | w = rois[:, 3] - rois[:, 1] + ALIGNED_FLAG.offset
16 | h = rois[:, 4] - rois[:, 2] + ALIGNED_FLAG.offset
17 | scale = (w * h)**0.5
18 | eps = 1e-6
19 | target_levels = (base_level + (scale / base_scale + eps).log2()).floor()
20 | target_levels = target_levels.to(dtype=torch.int64)
21 | min_level, max_level = min(levels), max(levels)
22 | minus_level = 0
23 | if minus_min_lvl:
24 | minus_level = min_level
25 | return torch.clamp(target_levels, min=min_level, max=max_level) - minus_level
26 |
27 |
28 | def map_rois_to_level(levels, base_scale, rois, base_level=0, minus_min_lvl=False):
29 | target_lvls = get_rois_target_levels(levels, base_scale, rois, base_level, minus_min_lvl)
30 | rois_by_level, rois_ix_by_level = [], []
31 | if minus_min_lvl:
32 | min_lvl = min(levels)
33 | levels = [lvl - min_lvl for lvl in levels]
34 | for lvl in levels:
35 | ix = torch.nonzero(target_lvls == lvl).reshape(-1)
36 | rois_by_level.append(rois[ix])
37 | rois_ix_by_level.append(ix)
38 | map_from_inds = torch.cat(rois_ix_by_level)
39 | map_back_inds = torch.zeros((rois.shape[0], ), dtype=torch.int64, device=rois.device)
40 | seq_inds = torch.arange(rois.shape[0], device=rois.device)
41 | map_back_inds[map_from_inds] = seq_inds
42 | return rois_by_level, map_back_inds
43 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tasks/tasks/ssl.rst:
--------------------------------------------------------------------------------
1 | 自监督
2 | ======
3 |
4 | UP支持自监督pretrain模型训练以及finetune下游任务;
5 |
6 | `具体代码 `_
7 |
8 | 配置文件
9 | --------
10 |
11 | * `pretrain `_
12 | * `finetune `_
13 |
14 | pretrain相关模块
15 | ----------------
16 |
17 | 以下为pertain配置文件示例:
18 |
19 | .. code-block:: yaml
20 |
21 | # pretrain骨干网络配置
22 | net: &subnet
23 | - name: backbone
24 | type: ssl # 自监督自任务
25 | multi_model: # 支持多个模型的骨干网络结构
26 | - name: encoder_q
27 | type: resnet50 # 骨干网络类型(cls模块中的模型)
28 | kwargs: # 其他参数
29 | frozen_layers: []
30 | ...
31 | - name: encoder_k
32 | type: resnet50 # 骨干网络类型(cls模块中的模型)
33 | kwargs: # 其他参数
34 | frozen_layers: []
35 | ...
36 | wrappers:
37 | - type: moco # 自监督模型,支持moco, simclr, simsiam
38 | kwargs:
39 | dim: 128
40 | K: 65536
41 |
42 | - name: post_process
43 | type: base_ssl_postprocess # 自监督后处理模块
44 | kwargs:
45 | ssl_loss: # 自监督损失函数
46 | type: moco_loss
47 |
48 | finetune相关模块
49 | ----------------
50 |
51 | 以下为finetune配置文件示例:
52 |
53 | .. code-block:: yaml
54 |
55 | dataset:
56 | type: cls
57 | kwargs:
58 | meta_file: ...
59 | image_reader:
60 | ...
61 | transformer: ...
62 | fraction: 0.1 # 做finetune的标签比例
63 |
64 | saver:
65 | save_dir: moco_v1_linear_dist8/checkpoints/cls_std
66 | results_dir: moco_v1_linear_dist8/results_dir/cls_std
67 | auto_resume: True
68 | pretrain_model: cls_std/ckpt.pth # pretrain模型参数地址
69 |
70 |
--------------------------------------------------------------------------------
/up/tasks/det/models/losses/smooth_l1_loss.py:
--------------------------------------------------------------------------------
1 | # Import from third library
2 | import torch
3 | import numpy as np
4 |
5 | # Import from local
6 | from up.models.losses.loss import BaseLoss, _reduce
7 | from up.utils.general.registry_factory import LOSSES_REGISTRY
8 |
9 |
10 | __all__ = ['SmoothL1Loss']
11 |
12 |
13 | def smooth_l1_loss(input, target, sigma, reduction='none', normalizer=None, code_weights=None, weights=None):
14 | beta = 1. / (sigma**2)
15 | diff = torch.abs(input - target)
16 | if code_weights is not None:
17 | diff = diff * code_weights.view(1, 1, -1)
18 | cond = diff < beta
19 | loss = torch.where(cond, 0.5 * diff**2 / beta, diff - 0.5 * beta)
20 | if weights is not None:
21 | assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
22 | loss = loss * weights.unsqueeze(-1)
23 | loss = _reduce(loss, reduction, normalizer=normalizer)
24 | return loss
25 |
26 |
27 | @LOSSES_REGISTRY.register('smooth_l1_loss')
28 | class SmoothL1Loss(BaseLoss):
29 | def __init__(self,
30 | name='smooth_l1',
31 | reduction='mean',
32 | loss_weight=1.0,
33 | sigma=1.0,
34 | code_weights=None
35 | ):
36 | BaseLoss.__init__(self,
37 | name=name,
38 | reduction=reduction,
39 | loss_weight=loss_weight)
40 | self.sigma = sigma
41 | self.key_fields = []
42 | self.code_weights = None
43 | if code_weights is not None:
44 | self.code_weights = np.array(code_weights, dtype=np.float32)
45 | self.code_weights = torch.from_numpy(self.code_weights).cuda()
46 | else:
47 | self.code_weights = None
48 |
49 | def forward(self, input, target, reduction, normalizer=None, weights=None):
50 | return smooth_l1_loss(input, target, self.sigma, reduction, normalizer, self.code_weights, weights)
51 |
--------------------------------------------------------------------------------
/configs/det/custom/custom_dataset.yaml:
--------------------------------------------------------------------------------
1 | num_classes: &num_classes 15
2 | runtime:
3 | task_names: det
4 | flip: &flip
5 | type: flip
6 | kwargs:
7 | flip_p: 0.5
8 |
9 | to_tensor: &to_tensor
10 | type: custom_to_tensor
11 |
12 | train_resize: &train_resize
13 | type: keep_ar_resize_max
14 | kwargs:
15 | padding_type: left_top
16 | padding_val: 0
17 | random_size: [10, 20]
18 |
19 | test_resize: &test_resize
20 | type: keep_ar_resize_max
21 | kwargs:
22 | max_size: 416
23 | padding_type: left_top
24 | padding_val: 0
25 |
26 |
27 | dataset:
28 | train:
29 | dataset:
30 | type: custom
31 | kwargs:
32 | num_classes: *num_classes
33 | meta_file: path/your/train.json
34 | image_reader:
35 | type: fs_opencv
36 | kwargs:
37 | image_dir: &image_dir path/your/train_image_dir
38 | color_mode: BGR
39 | transformer: [*flip, *train_resize, *to_tensor]
40 | batch_sampler:
41 | type: base
42 | kwargs:
43 | sampler:
44 | type: dist
45 | kwargs:
46 | fix_seed: false
47 | batch_size: 8
48 | test:
49 | dataset:
50 | type: custom
51 | kwargs:
52 | num_classes: *num_classes
53 | meta_file: >_file path/your/test.json
54 | image_reader:
55 | type: fs_opencv
56 | kwargs:
57 | image_dir: path/your/test_image_dir
58 | color_mode: BGR
59 | transformer: [*test_resize, *to_tensor]
60 | evaluator:
61 | type: MR
62 | kwargs:
63 | gt_file: *gt_file
64 | iou_thresh: 0.5
65 | num_classes: *num_classes
66 | batch_sampler:
67 | type: base
68 | kwargs:
69 | sampler:
70 | type: dist
71 | kwargs: {}
72 | batch_size: 8
73 | dataloader:
74 | type: base
75 | kwargs:
76 | num_workers: 4
77 | alignment: 32
78 | worker_init: true
79 | pad_type: batch_pad
80 |
--------------------------------------------------------------------------------
/up/tasks/cls/data/cls_dataloader.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import DataLoader
2 | import torch
3 | import numpy as np
4 | from easydict import EasyDict
5 |
6 | from up.utils.general.registry_factory import DATALOADER_REGISTRY, BATCHING_REGISTRY
7 | from up.data.samplers.batch_sampler import InfiniteBatchSampler
8 |
9 |
10 | __all__ = ['ClassDataLoader']
11 |
12 |
13 | @DATALOADER_REGISTRY.register('cls_base')
14 | class ClassDataLoader(DataLoader):
15 |
16 | def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
17 | num_workers=0, pin_memory=False, drop_last=False, batch_fn=None
18 | ):
19 | super(ClassDataLoader, self).__init__(
20 | dataset, batch_size, shuffle, sampler, batch_sampler, num_workers,
21 | self._collate_fn, pin_memory, drop_last)
22 | if batch_fn is not None:
23 | self.batch_fn = BATCHING_REGISTRY.get(batch_fn['type'])(**batch_fn['kwargs'])
24 | else:
25 | self.batch_fn = None
26 |
27 | def _collate_fn(self, batch):
28 | images = torch.stack([_.image for _ in batch])
29 | if isinstance(batch[0].gt, int):
30 | gts = torch.from_numpy(np.array([_.gt for _ in batch]))
31 | elif isinstance(batch[0].gt, list):
32 | gts = torch.stack([torch.from_numpy(np.array(_.gt)) for _ in batch])
33 | elif batch[0].gt.dim() > 0:
34 | gts = torch.stack([_.gt for _ in batch])
35 |
36 | filenames = [_.get('filename', '') for _ in batch]
37 |
38 | output = EasyDict({
39 | 'image': images,
40 | 'gt': gts,
41 | 'filenames': filenames,
42 | })
43 | if self.batch_fn is not None:
44 | output = self.batch_fn(output)
45 |
46 | return output
47 |
48 | def get_epoch_size(self):
49 | if isinstance(self.batch_sampler, InfiniteBatchSampler):
50 | return len(self.batch_sampler.batch_sampler) # training
51 | return len(self.batch_sampler)
52 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/onenet/models/postprocess/onenet_predictor.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from up.utils.general.registry_factory import ROI_PREDICTOR_REGISTRY
4 | from up.tasks.det.models.utils.bbox_helper import clip_bbox, filter_by_size
5 |
6 | __all__ = ['OnenetPredictor']
7 |
8 |
9 | @ROI_PREDICTOR_REGISTRY.register('onenet')
10 | class OnenetPredictor(object):
11 | def __init__(self, topk_boxes=100, min_size=0.0):
12 | self.topk_boxes = topk_boxes
13 | self.min_size = min_size
14 |
15 | @torch.no_grad()
16 | def predict(self, preds, input):
17 | img_info = input['image_info']
18 | cls_pred = preds[0].permute(0, 2, 1)
19 | loc_pred = preds[1].permute(0, 2, 1)
20 |
21 | B = preds[0].shape[0]
22 | det_results = []
23 |
24 | for i in range(B):
25 | cls_pred_per_img = cls_pred[i]
26 | loc_pred_per_img = loc_pred[i]
27 | topk_score_per_c, topk_ind_per_c = torch.topk(cls_pred_per_img, k=self.topk_boxes)
28 | topk_boxes_per_c = loc_pred_per_img[:, topk_ind_per_c.reshape(-1)]
29 | topk_score_all, topk_ind_all = torch.topk(topk_score_per_c.reshape(-1), k=self.topk_boxes)
30 | topk_boxes_all = topk_boxes_per_c[:, topk_ind_all].transpose(0, 1)
31 |
32 | topk_label_all = topk_ind_all // self.topk_boxes + 1
33 |
34 | detections = torch.cat([topk_boxes_all, topk_score_all.unsqueeze(-1), topk_label_all.unsqueeze(-1)], dim=1)
35 |
36 | detections = clip_bbox(detections, img_info[i])
37 | detections, _ = filter_by_size(detections, self.min_size)
38 |
39 | det_results.append(torch.cat([detections.new_full((detections.shape[0], 1), i), detections], dim=1))
40 |
41 | if len(det_results):
42 | det_results.append(cls_pred.new_zeros((1, 7)))
43 | bboxes = torch.cat(det_results, dim=0)
44 | return {'dt_bboxes': bboxes}
45 |
46 |
47 | def build_onenet_predictor(predictor_cfg):
48 | return ROI_PREDICTOR_REGISTRY.build(predictor_cfg)
49 |
--------------------------------------------------------------------------------
/docs/source/English/tasks/tasks/multitask.rst:
--------------------------------------------------------------------------------
1 | Multi-task joint training
2 | =========================
3 |
4 | UP supports multi-task joint training and inference. The training pipeline firstly gives each task the corresponding training data, secondly computes loss, thirdly sums up the losses, and finally backwards for updating gradients.
5 |
6 | `Codes `_
7 |
8 | Configs
9 | -------
10 |
11 | It contains the illustration of common configs.
12 |
13 | `Configs `_
14 |
15 | Illustration of configs
16 | -----------------------
17 |
18 | The details can be refered to the above configs. The necessary setting is as followed.
19 |
20 | .. code-block:: yaml
21 |
22 | # runner: revising runner
23 | runtime:
24 | runner:
25 | type: multitask
26 | task_names: &task_names [det, cls]
27 | ...
28 |
29 | # Train/test of the task should be set in the datasetpart, which must contain 'train'.
30 | dataset:
31 | train: ...
32 | test: ...
33 | train_cls: ...
34 | test_cls: ...
35 |
36 | # multitask cfg
37 | multitask_cfg:
38 | notions:
39 | det: &det 0 # will be further replaced by numbers.
40 | cls: &cls 1
41 | task_names: *task_names # task name.
42 | datasets: # training/testing dataset for every task.
43 | train: [train, train_cls]
44 | test: [test, test_cls]
45 | debug: &multitask_debug false
46 |
47 | # net: every head needs special operations for easily controlling the pipline.
48 | - name: roi_head
49 | prev: neck
50 | type: RetinaHeadWithBN
51 | kwargs:
52 | ...
53 | wrappers:
54 | - &det_wrapper
55 | type: multitask_wrapper
56 | kwargs:
57 | cfg:
58 | idxs: [ *det ] # controlling the forwarding task
59 | debug: *multitask_debug
60 |
--------------------------------------------------------------------------------
/up/tasks/det/plugins/yolox/utils/hook_helper.py:
--------------------------------------------------------------------------------
1 | from up.utils.general.registry_factory import HOOK_REGISTRY
2 | from up.utils.general.hook_helper import Hook
3 | from up.utils.env.dist_helper import env
4 | from up.utils.general.log_helper import default_logger as logger
5 |
6 |
7 | __all__ = ['YoloxNoaug']
8 |
9 |
10 | @HOOK_REGISTRY.register('yolox_noaug')
11 | class YoloxNoaug(Hook):
12 | def __init__(self, runner, no_aug_epoch=15, max_epoch=300, transformer=[], test_freq=1, save_freq=1):
13 | super(YoloxNoaug, self).__init__(runner)
14 | self.no_aug_epoch = no_aug_epoch
15 | self.max_epoch = max_epoch
16 | self.transformer = transformer
17 | self.flag = False
18 | self.test_freq = test_freq
19 | self.save_freq = save_freq
20 |
21 | def before_forward(self, cur_iter, input):
22 | runner = self.runner_ref()
23 | if cur_iter >= runner.data_loaders['train'].get_epoch_size() * (self.max_epoch - self.no_aug_epoch):
24 | if not self.flag:
25 | logger.info(f"rebuild dataset transformer cfg {self.transformer}")
26 | runner.config['dataset']['train']['dataset']['kwargs']['transformer'] = self.transformer
27 | del runner.data_loaders, runner.data_iterators['train']
28 | import gc
29 | gc.collect()
30 | if not hasattr(self, 'data_iterators'):
31 | runner.data_iterators = {}
32 | logger.info("rebuild dataloader")
33 | runner.build_dataloaders()
34 | runner.data_iterators['train'] = iter(runner.data_loaders["train"])
35 | try:
36 | if env.world_size > 1:
37 | runner.model.module.yolox_post.use_l1 = True
38 | else:
39 | runner.model.yolox_post.use_l1 = True
40 | except: # noqa
41 | pass
42 | runner.test_freq = self.test_freq
43 | runner.save_freq = self.save_freq
44 | self.flag = True
45 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/guide/environment.rst:
--------------------------------------------------------------------------------
1 | 环境变量
2 | ========
3 |
4 | 介绍通过改变环境变量可以实现的功能。环境变量通过如下方式改变。
5 |
6 | .. code-block:: bash
7 |
8 | export xxx=xxxx
9 |
10 | up.__init__
11 | -----------
12 |
13 | UP 通过将 plugins 加入环境变量来读取路径。
14 |
15 | .. code-block:: python
16 |
17 | _PPPATH = 'PLUGINPATH'
18 | if _PPPATH not in os.environ:
19 | return
20 | path_list = os.environ[_PPPATH].split(':')
21 |
22 | up.runner.base_runner
23 | ---------------------
24 |
25 | UP 将 batchsize, display, dataset_timer 加入环境变量。
26 |
27 | * BATCH_SIZE 用于控制 batchsize。
28 |
29 | * DISPLAY_FREQ 用于控制运行中的信息显示频率。
30 |
31 | * DATASET_TIMER_ENABLED, DATASET_TIMER_THRESHOLD 用来控制数据集相关时间的显示。
32 |
33 | .. code-block:: python
34 |
35 | os.environ['BATCH_SIZE'] = str(batch_size)
36 | os.environ['DISPLAY_FREQ'] = str(self.config['args'].get('display', 1)) # The interval of iterations for showing.
37 | if cfg_dataset_timer: # The time evalutor for datasets.
38 | os.environ['DATASET_TIMER_ENABLED'] = str(1 if cfg_dataset_timer['enabled'] is True else 0)
39 | os.environ['DATASET_TIMER_THRESHOLD'] = str(cfg_dataset_timer['threshold_seconds'])
40 |
41 | up.tasks.__init__
42 | -----------------
43 |
44 | UP 通过将 DEFAULT_TASKS, EXCLUDE_TASKS 加入环境变量来控制任务加载。
45 |
46 | * DEFAULT_TASKS 在 tasks 目录下的所有任务。
47 |
48 | * EXCLUDE_TASKS 不被读取的任务类型。
49 |
50 | .. code-block:: python
51 |
52 | pwd = os.path.dirname(os.path.realpath(__file__))
53 | tasks_names = os.environ.get("DEFAULT_TASKS", os.listdir(pwd)) # loading all tasks.
54 | exclude_tasks = os.environ.get("EXCLUDE_TASKS", '').split(":") # excluding the writing task.
55 |
56 | up.utils.general.petrel_helper
57 | ------------------------------
58 |
59 | UP 将 PETRELPATH 加入环境变量来控制软件。
60 |
61 | .. code-block:: python
62 |
63 | default_conf_path = os.environ.get('PETRELPATH', '~/petreloss.conf')
64 |
65 | up.utils.general.registry
66 | -------------------------
67 |
68 | UP 将 REGTRACE 加入环境变量来控制注册器。
69 |
70 | .. code-block:: python
71 |
72 | _REG_TRACE_IS_ON = os.environ.get('REGTRACE', 'OFF').upper() == 'ON'
73 |
--------------------------------------------------------------------------------
/up/tasks/distill/losses/ce_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 | # from up.utils.general.log_helper import default_logger as logger
6 | from up.utils.general.registry_factory import MIMIC_LOSS_REGISTRY
7 |
8 |
9 | @MIMIC_LOSS_REGISTRY.register('ce_loss')
10 | class CELoss(nn.Module):
11 | """
12 | Distilling the Knowledge in a Neural Network, NIPS2014.
13 | https://arxiv.org/pdf/1503.02531.pdf
14 | """
15 | def __init__(self, T=1, loss_weight=1.0):
16 | super().__init__()
17 | self.loss_weight = loss_weight
18 | self.t = T
19 |
20 | def forward(self, s_preds, t_preds, **kwargs):
21 | loss = 0
22 | for s_pred, t_pred in zip(s_preds, t_preds):
23 | s = F.log_softmax(s_pred / self.t, dim=1)
24 | t = F.softmax(t_pred / self.t, dim=1)
25 | loss += torch.mean(torch.sum(- t * s, 1))
26 | return loss * self.loss_weight
27 |
28 |
29 | @MIMIC_LOSS_REGISTRY.register('bce_loss')
30 | class BCELoss(nn.Module):
31 | """
32 | BCE Loss
33 | """
34 | def __init__(self, T=1.0, batch_mean=False, loss_weight=1.0):
35 | super().__init__()
36 | self.loss_weight = loss_weight
37 | self.T = T
38 | self.batch_mean = batch_mean
39 |
40 | def forward(self, pred_s, pred_t, masks=None):
41 | assert isinstance(pred_s, list) and isinstance(pred_t, list), 'preds must be list!'
42 | if masks is not None:
43 | assert isinstance(masks, list) and len(masks) == len(pred_s), 'masks must be consistent with preds!'
44 | else:
45 | masks = [s.new_ones(s.shape).float().detach() for s in pred_s]
46 |
47 | total_loss = 0
48 | for idx, (s, t) in enumerate(zip(pred_s, pred_t)):
49 | loss = torch.sum(F.binary_cross_entropy(s, t, reduction='none') * masks[idx])
50 | if self.batch_mean:
51 | loss = loss / s.size(0)
52 | elif masks[idx].sum() != 0:
53 | loss = loss / masks[idx].sum()
54 | total_loss += loss
55 | return total_loss * self.loss_weight
56 |
--------------------------------------------------------------------------------
/up/utils/general/registry_factory.py:
--------------------------------------------------------------------------------
1 | from .registry import Registry
2 |
3 | # model
4 | MODULE_ZOO_REGISTRY = Registry()
5 | MODULE_PROCESS_REGISTRY = Registry()
6 | MODULE_WRAPPER_REGISTRY = Registry()
7 | MODEL_WRAPPER_REGISTRY = Registry()
8 | EMA_REGISTRY = Registry()
9 |
10 | # data
11 | DATASET_REGISTRY = Registry()
12 | DATALOADER_REGISTRY = Registry()
13 | BATCH_SAMPLER_REGISTRY = Registry()
14 | AUGMENTATION_REGISTRY = Registry()
15 | BATCHING_REGISTRY = Registry()
16 |
17 | # predictor
18 | ROI_PREDICTOR_REGISTRY = Registry()
19 | BBOX_PREDICTOR_REGISTRY = Registry()
20 | MASK_PREDICTOR_REGISTRY = Registry()
21 |
22 | # supervisior
23 | ROI_SUPERVISOR_REGISTRY = Registry()
24 | BBOX_SUPERVISOR_REGISTRY = Registry()
25 | MASK_SUPERVISOR_REGISTRY = Registry()
26 |
27 | # matcher
28 | MATCHER_REGISTRY = Registry()
29 |
30 | # sampler
31 | ROI_SAMPLER_REGISTRY = Registry()
32 | SAMPLER_REGISTRY = Registry()
33 |
34 | # merger
35 | ROI_MERGER_REGISTRY = Registry()
36 |
37 | # lr
38 | WARM_LR_REGISTRY = Registry()
39 | LR_REGISTRY = Registry()
40 |
41 | # evaluator
42 | EVALUATOR_REGISTRY = Registry()
43 |
44 | # loss
45 | LOSSES_REGISTRY = Registry()
46 |
47 | # image reader
48 | IMAGE_READER_REGISTRY = Registry()
49 |
50 | # hook
51 | HOOK_REGISTRY = Registry()
52 |
53 | # saver
54 | SAVER_REGISTRY = Registry()
55 |
56 | # anchor generate
57 | ANCHOR_GENERATOR_REGISTRY = Registry()
58 |
59 | # mask target generate
60 | MASK_GENERATOR_REGISTRY = Registry()
61 |
62 | # subcommand
63 | SUBCOMMAND_REGISTRY = Registry()
64 |
65 | # initializer
66 | INITIALIZER_REGISTRY = Registry()
67 |
68 | # runner
69 | RUNNER_REGISTRY = Registry()
70 |
71 | # inferencer
72 | INFERENCER_REGISTRY = Registry()
73 | VISUALIZER_REGISTRY = Registry()
74 |
75 | # optimizer
76 | OPTIMIZER_REGISTRY = Registry()
77 |
78 | LR_SCHEDULER_REGISTY = Registry()
79 |
80 | WARM_SCHEDULER_REGISTY = Registry()
81 |
82 | DATA_BUILDER_REGISTY = Registry()
83 |
84 | MODEL_HELPER_REGISTRY = Registry()
85 |
86 | # deploy
87 | DEPLOY_REGISTRY = Registry()
88 | TOONNX_REGISTRY = Registry()
89 |
90 | # distill
91 | MIMIC_REGISTRY = Registry()
92 | MIMIC_LOSS_REGISTRY = Registry()
93 |
94 | # box_coder
95 | BOX_CODER_REGISTRY = Registry()
96 |
--------------------------------------------------------------------------------
/up/tasks/quant/deploy/quant_deploy.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from up.utils.general.log_helper import default_logger as logger
3 | from up.utils.general.registry_factory import DEPLOY_REGISTRY
4 | from up.tasks.quant.runner import QuantRunner
5 | from mqbench.utils.state import enable_quantization
6 | from up.utils.general.global_flag import DEPLOY_FLAG
7 |
8 |
9 | __all__ = ['QuantDeploy']
10 |
11 |
12 | @DEPLOY_REGISTRY.register('quant')
13 | class QuantDeploy(QuantRunner):
14 | def __init__(self, config, work_dir='./'):
15 | self.config = config
16 | super(QuantDeploy, self).__init__(config, work_dir, False)
17 |
18 | def build(self):
19 | self.build_ema()
20 | self.build_saver()
21 | self.load_ckpt()
22 | self.build_model()
23 | self.get_onnx_dummy_input()
24 | self.quantize_model()
25 | self.resume_model_from_quant()
26 | enable_quantization(self.model)
27 |
28 | def resume_model_from_quant(self):
29 | self.model.load_state_dict(self.ckpt['model'])
30 |
31 | def get_onnx_dummy_input(self):
32 | self.model.cuda().eval()
33 | DEPLOY_FLAG.flag = False
34 | self.build_dataloaders()
35 | self.build_hooks()
36 | batch = self.get_batch('test')
37 | output = self.model(batch)
38 | self.dummy_input = {k: v for k, v in output.items() if torch.is_tensor(v)}
39 | DEPLOY_FLAG.flag = True
40 |
41 | def deploy(self):
42 | logger.info("deploy model")
43 | from mqbench.convert_deploy import convert_deploy
44 | deploy_backend = self.config['quant']['deploy_backend']
45 | self.model.cuda().eval()
46 | print('ONNX input shape is: ', self.dummy_input['image'].shape)
47 |
48 | for index, mname in enumerate(self.model_list):
49 | mod = getattr(self.model, mname)
50 | print('{}/{} model will be exported.'.format(index + 1, len(self.model_list)))
51 | print('Model name is : ', mname)
52 | print()
53 | convert_deploy(model=mod,
54 | backend_type=self.backend_type[deploy_backend],
55 | dummy_input=self.dummy_input,
56 | model_name=mname)
57 |
--------------------------------------------------------------------------------
/up/models/losses/loss.py:
--------------------------------------------------------------------------------
1 | # Import from third library
2 | from torch.nn.modules.loss import _Loss
3 |
4 |
5 | def _reduce(loss, reduction, **kwargs):
6 | if reduction == 'none':
7 | ret = loss
8 | elif reduction == 'mean':
9 | normalizer = loss.numel()
10 | if kwargs.get('normalizer', None):
11 | normalizer = kwargs['normalizer']
12 | ret = loss.sum() / normalizer
13 | elif reduction == 'sum':
14 | ret = loss.sum()
15 | else:
16 | raise ValueError(reduction + ' is not valid')
17 | return ret
18 |
19 |
20 | class BaseLoss(_Loss):
21 | # do not use syntax like `super(xxx, self).__init__,
22 | # which will cause infinited recursion while using class decorator`
23 | def __init__(self,
24 | name='base',
25 | reduction='none',
26 | loss_weight=1.0):
27 | r"""
28 | Arguments:
29 | - name (:obj:`str`): name of the loss function
30 | - reduction (:obj:`str`): reduction type, choice of mean, none, sum
31 | - loss_weight (:obj:`float`): loss weight
32 | """
33 | _Loss.__init__(self, reduction=reduction)
34 | self.loss_weight = loss_weight
35 | self.name = name
36 |
37 | def __call__(self, input, target, reduction_override=None, normalizer_override=None, **kwargs):
38 | r"""
39 | Arguments:
40 | - input (:obj:`Tensor`)
41 | - reduction (:obj:`Tensor`)
42 | - reduction_override (:obj:`str`): choice of 'none', 'mean', 'sum', override the reduction type
43 | defined in __init__ function
44 |
45 | - normalizer_override (:obj:`float`): override the normalizer when reduction is 'mean'
46 | """
47 | reduction = reduction_override if reduction_override else self.reduction
48 | assert (normalizer_override is None or reduction == 'mean'), \
49 | f'normalizer is not allowed when reduction is {reduction}'
50 | loss = _Loss.__call__(self, input, target, reduction, normalizer=normalizer_override, **kwargs)
51 | return loss * self.loss_weight
52 |
53 | def forward(self, input, target, reduction, normalizer=None, **kwargs):
54 | raise NotImplementedError
55 |
--------------------------------------------------------------------------------
/up/extensions/csrc/roi_align/roi_align_cuda.cpp:
--------------------------------------------------------------------------------
1 | #include "roi_align/roi_align.h"
2 |
3 | using at::Tensor;
4 |
5 | int roi_align_avg_forward_cuda(bool aligned, int aligned_height, int aligned_width,
6 | float spatial_scale, int sampling_ratio,
7 | Tensor features, Tensor rois, Tensor output)
8 | {
9 | // Grab the input tensor
10 | CHECK_INPUT(features);
11 | CHECK_INPUT(rois);
12 | CHECK_INPUT(output);
13 | // Number of ROIs
14 | int num_rois = rois.size(0);
15 | int size_rois = rois.size(1);
16 | if (size_rois != 5)
17 | {
18 | exit(1);
19 | return 1;
20 | }
21 |
22 | // data height
23 | int data_height = features.size(2);
24 | // data width
25 | int data_width = features.size(3);
26 | // Number of channels
27 | int num_channels = features.size(1);
28 |
29 |
30 | ROIAlignAvgForwardLaucher(
31 | aligned, features, spatial_scale, num_rois, data_height,
32 | data_width, num_channels, aligned_height,
33 | aligned_width, sampling_ratio, rois,
34 | output);
35 |
36 | return 0;
37 | }
38 |
39 | int roi_align_avg_backward_cuda(bool aligned, int aligned_height, int aligned_width,
40 | float spatial_scale, int sampling_ratio,
41 | Tensor top_grad, Tensor rois, Tensor bottom_grad)
42 | {
43 | // Grab the input tensor
44 | CHECK_INPUT(top_grad);
45 | CHECK_INPUT(rois);
46 | CHECK_INPUT(bottom_grad);
47 |
48 |
49 | // Number of ROIs
50 | int num_rois = rois.size(0);
51 | int size_rois = rois.size(1);
52 | if (size_rois != 5)
53 | {
54 | exit(1);
55 | return 1;
56 | }
57 |
58 | // batch size
59 | int batch_size = bottom_grad.size(0);
60 | // data height
61 | int data_height = bottom_grad.size(2);
62 | // data width
63 | int data_width = bottom_grad.size(3);
64 | // Number of channels
65 | int num_channels = bottom_grad.size(1);
66 |
67 | ROIAlignAvgBackwardLaucher(
68 | aligned, top_grad, spatial_scale, batch_size, num_rois, data_height,
69 | data_width, num_channels, aligned_height,
70 | aligned_width, sampling_ratio, rois,
71 | bottom_grad);
72 |
73 | return 0;
74 | }
--------------------------------------------------------------------------------
/up/extensions/csrc/roi_align/roi_align.h:
--------------------------------------------------------------------------------
1 | #ifndef ROIALIGN_H_
2 | #define ROIALIGN_H_
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
9 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
10 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11 |
12 | // for tocaffe
13 | int roi_align_forward(
14 | bool aligned,
15 | int aligned_height, int aligned_width,
16 | float spatial_scale, int sampling_ratio,
17 | at::Tensor features, at::Tensor rois, at::Tensor output);
18 |
19 | int roi_align_avg_forward_cuda(
20 | bool aligned, int aligned_height, int aligned_width,
21 | float spatial_scale, int sampling_ratio,
22 | at::Tensor features, at::Tensor rois, at::Tensor output);
23 |
24 | int roi_align_avg_backward_cuda(
25 | bool aligned, int aligned_height, int aligned_width,
26 | float spatial_scale, int sampling_ratio,
27 | at::Tensor top_grad, at::Tensor rois, at::Tensor bottom_grad);
28 |
29 | int ROIAlignAvgBackwardLaucher(
30 | const bool aligned, at::Tensor top_diff, const float spatial_scale,
31 | const int batch_size, const int num_rois, const int height, const int width,
32 | const int channels, const int aligned_height, const int aligned_width,
33 | const int sampling_ratio,
34 | at::Tensor bottom_rois, at::Tensor bottom_diff);
35 |
36 | int ROIAlignAvgForwardLaucher(
37 | const bool aligned, at::Tensor bottom_data, const float spatial_scale,
38 | const int num_rois, const int height, const int width,
39 | const int channels, const int aligned_height, const int aligned_width,
40 | const int sampling_ratio,
41 | at::Tensor bottom_rois, at::Tensor top_data);
42 |
43 | /*
44 | int roi_align_max_forward_cuda(
45 | bool aligned, int aligned_height, int aligned_width,
46 | float spatial_scale, int sampling_ratio,
47 | at::Tensor features, at::Tensor rois, at::Tensor output);
48 |
49 | int roi_align_max_backward_cuda(
50 | bool aligned, int aligned_height, int aligned_width,
51 | float spatial_scale, int sampling_ratio,
52 | at::Tensor top_grad, at::Tensor rois, at::Tensor bottom_grad, at::Tensor bottom_data);
53 | */
54 |
55 | #endif
56 |
--------------------------------------------------------------------------------
/up/extensions/csrc/roipoint_pool3d/roipoint_pool3d.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #define CHECK_CUDA(x) do { \
5 | if (!x.type().is_cuda()) { \
6 | fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
7 | exit(-1); \
8 | } \
9 | } while (0)
10 | #define CHECK_CONTIGUOUS(x) do { \
11 | if (!x.is_contiguous()) { \
12 | fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
13 | exit(-1); \
14 | } \
15 | } while (0)
16 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
17 |
18 |
19 | void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num,
20 | const float *xyz, const float *boxes3d, const float *pts_feature, float *pooled_features, int *pooled_empty_flag);
21 |
22 |
23 | int roipool3d_gpu(at::Tensor xyz, at::Tensor boxes3d, at::Tensor pts_feature, at::Tensor pooled_features, at::Tensor pooled_empty_flag){
24 | // params xyz: (B, N, 3)
25 | // params boxes3d: (B, M, 7)
26 | // params pts_feature: (B, N, C)
27 | // params pooled_features: (B, M, 512, 3+C)
28 | // params pooled_empty_flag: (B, M)
29 | CHECK_INPUT(xyz);
30 | CHECK_INPUT(boxes3d);
31 | CHECK_INPUT(pts_feature);
32 | CHECK_INPUT(pooled_features);
33 | CHECK_INPUT(pooled_empty_flag);
34 |
35 | int batch_size = xyz.size(0);
36 | int pts_num = xyz.size(1);
37 | int boxes_num = boxes3d.size(1);
38 | int feature_in_len = pts_feature.size(2);
39 | int sampled_pts_num = pooled_features.size(2);
40 |
41 |
42 | const float * xyz_data = xyz.data();
43 | const float * boxes3d_data = boxes3d.data();
44 | const float * pts_feature_data = pts_feature.data();
45 | float * pooled_features_data = pooled_features.data();
46 | int * pooled_empty_flag_data = pooled_empty_flag.data();
47 |
48 | roipool3dLauncher(batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num,
49 | xyz_data, boxes3d_data, pts_feature_data, pooled_features_data, pooled_empty_flag_data);
50 |
51 |
52 |
53 | return 1;
54 | }
55 |
56 | /*
57 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
58 | m.def("forward", &roipool3d_gpu, "roipool3d forward (CUDA)");
59 | }
60 | */
61 |
--------------------------------------------------------------------------------
/configs/det/custom/rank_dataset.yaml:
--------------------------------------------------------------------------------
1 | num_classes: &num_classes 10
2 | runtime:
3 | task_names: det
4 | flip: &flip
5 | type: flip
6 | kwargs:
7 | flip_p: 0.5
8 |
9 | to_tensor: &to_tensor
10 | type: custom_to_tensor
11 |
12 | train_resize: &train_resize
13 | type: keep_ar_resize_max
14 | kwargs:
15 | padding_type: left_top
16 | padding_val: 0
17 | random_size: [10, 20]
18 |
19 | test_resize: &test_resize
20 | type: keep_ar_resize_max
21 | kwargs:
22 | max_size: 416
23 | padding_type: left_top
24 | padding_val: 0
25 |
26 |
27 | dataset:
28 | train:
29 | dataset:
30 | type: rank_custom
31 | kwargs:
32 | num_classes: *num_classes
33 | meta_file: path/your/train.json
34 | image_reader:
35 | type: fs_opencv
36 | kwargs:
37 | image_dir: &image_dir path/your/train_image_dir
38 | color_mode: BGR
39 | transformer: [*flip, *train_resize, *to_tensor]
40 | batch_sampler:
41 | type: base
42 | kwargs:
43 | sampler:
44 | type: local # special for rank_dataset
45 | kwargs: {}
46 | batch_size: 8
47 | test:
48 | dataset:
49 | type: custom
50 | kwargs:
51 | num_classes: *num_classes
52 | meta_file: >_file path/your/test.json
53 | image_reader:
54 | type: fs_opencv
55 | kwargs:
56 | image_dir: path/your/test_image_dir
57 | color_mode: BGR
58 | transformer: [*test_resize, *to_tensor]
59 | evaluator:
60 | type: MR
61 | kwargs:
62 | gt_file: *gt_file
63 | iou_thresh: 0.5
64 | num_classes: *num_classes
65 | reload_cfg: # special for rank_dataset
66 | mini_epoch: 1
67 | seed: 0
68 | group: &group 1 # to avoid memory overflow, generate random numbers in groups
69 | batch_sampler:
70 | type: base
71 | kwargs:
72 | sampler:
73 | type: dist
74 | kwargs: {}
75 | batch_size: 8
76 | dataloader:
77 | type: base
78 | kwargs:
79 | num_workers: 4
80 | alignment: 32
81 | worker_init: true
82 | pad_type: batch_pad
83 |
84 | # special for rank_dataset
85 | hooks:
86 | - type: reload
87 | kwargs:
88 | group: *group
89 |
--------------------------------------------------------------------------------
/up/tasks/ssl/wrapper/simclr.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from up.utils.general.registry_factory import (
5 | MODULE_WRAPPER_REGISTRY
6 | )
7 |
8 |
9 | @MODULE_WRAPPER_REGISTRY.register('simclr')
10 | class SimCLR(nn.Module):
11 |
12 | def __init__(self, encoder, mlp=True, num_mlp_layer=2, dim=2048, output_dim=128):
13 | super(SimCLR, self).__init__()
14 |
15 | self.encoder = encoder
16 | self.mlp = mlp
17 | self.num_mlp_layer = num_mlp_layer
18 |
19 | if mlp:
20 | if num_mlp_layer == 1:
21 | self.encoder_fc = nn.Sequential(nn.Linear(dim, output_dim))
22 | elif num_mlp_layer > 1:
23 | layers = []
24 | for _ in range(num_mlp_layer - 1):
25 | layers += [nn.Linear(dim, dim), nn.ReLU()]
26 | layers.append(nn.Linear(dim, output_dim))
27 | self.encoder_fc = nn.Sequential(*layers)
28 |
29 | def forward(self, input):
30 | if isinstance(input, dict):
31 | input = input['image']
32 | input = torch.cat((input[:, 0], input[:, 1]), dim=0)
33 | features = self.encoder({'image' : input})['features'][-1].mean(dim=[2, 3])
34 | features = nn.functional.normalize(features, dim=1)
35 | if self.mlp:
36 | features = self.encoder_fc(features)
37 |
38 | features = features.view(features.shape[0], -1)
39 |
40 | labels = torch.cat([torch.arange(features.shape[0] // 2), torch.arange(features.shape[0] // 2)], dim=0)
41 | labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
42 |
43 | features = F.normalize(features, dim=1)
44 |
45 | similarity_matrix = torch.matmul(features, features.T)
46 | mask = torch.eye(labels.shape[0], dtype=torch.bool)
47 |
48 | labels = labels[~mask].view(labels.shape[0], -1)
49 | similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
50 |
51 | positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)
52 | negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)
53 |
54 | logits = torch.cat([positives, negatives], dim=1)
55 | logits = logits / 0.07
56 |
57 | return {'logits': logits}
58 |
--------------------------------------------------------------------------------
/up/tasks/det_3d/data/data_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | try:
4 | import SharedArray
5 | except: # noqa
6 | pass
7 |
8 |
9 | def check_numpy_to_torch(x):
10 | if isinstance(x, np.ndarray):
11 | return torch.from_numpy(x).float(), True
12 | return x, False
13 |
14 |
15 | def keep_arrays_by_name(gt_names, used_classes):
16 | inds = [i for i, x in enumerate(gt_names) if x in used_classes]
17 | inds = np.array(inds, dtype=np.int64)
18 | return inds
19 |
20 |
21 | def get_pad_params(desired_size, cur_size):
22 | """
23 | Get padding parameters for np.pad function
24 | Args:
25 | desired_size: int, Desired padded output size
26 | cur_size: int, Current size. Should always be less than or equal to cur_size
27 | Returns:
28 | pad_params: tuple(int), Number of values padded to the edges (before, after)
29 | """
30 | assert desired_size >= cur_size
31 |
32 | # Calculate amount to pad
33 | diff = desired_size - cur_size
34 | pad_params = (0, diff)
35 |
36 | return pad_params
37 |
38 |
39 | def rotate_points_along_z(points, angle):
40 | """
41 | Args:
42 | points: (B, N, 3 + C)
43 | angle: (B), angle along z-axis, angle increases x ==> y
44 | Returns:
45 |
46 | """
47 | points, is_numpy = check_numpy_to_torch(points)
48 | angle, _ = check_numpy_to_torch(angle)
49 |
50 | cosa = torch.cos(angle)
51 | sina = torch.sin(angle)
52 | zeros = angle.new_zeros(points.shape[0])
53 | ones = angle.new_ones(points.shape[0])
54 | rot_matrix = torch.stack((
55 | cosa, sina, zeros,
56 | -sina, cosa, zeros,
57 | zeros, zeros, ones
58 | ), dim=1).view(-1, 3, 3).float()
59 | points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
60 | points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
61 | return points_rot.numpy() if is_numpy else points_rot
62 |
63 |
64 | def limit_period(val, offset=0.5, period=np.pi):
65 | val, is_numpy = check_numpy_to_torch(val)
66 | ans = val - torch.floor(val / period + offset) * period
67 | return ans.numpy() if is_numpy else ans
68 |
69 |
70 | def sa_create(name, var):
71 | x = SharedArray.create(name, var.shape, dtype=var.dtype)
72 | x[...] = var[...]
73 | x.flags.writeable = False
74 | return x
75 |
--------------------------------------------------------------------------------
/up/extensions/python/nms.py:
--------------------------------------------------------------------------------
1 | # Import from third library
2 | import numpy as np
3 | import torch
4 | from ..ext import naive_nms as nms
5 | from ..ext import softer_nms as softer_nms_c
6 | from up.utils.general.global_flag import ALIGNED_FLAG
7 |
8 |
9 | def naive_nms(dets, thresh):
10 | assert dets.shape[1] == 5
11 | num_dets = dets.shape[0]
12 | if torch.is_tensor(num_dets):
13 | num_dets = num_dets.item()
14 | keep = torch.LongTensor(num_dets)
15 | num_out = torch.LongTensor(1)
16 | if dets.device.type == 'cuda':
17 | nms.gpu_nms(keep, num_out, dets.float(), thresh, ALIGNED_FLAG.offset)
18 | else:
19 | x1 = dets[:, 0]
20 | y1 = dets[:, 1]
21 | x2 = dets[:, 2]
22 | y2 = dets[:, 3]
23 | areas = (x2 - x1 + ALIGNED_FLAG.offset) * (y2 - y1 + ALIGNED_FLAG.offset)
24 | order = torch.from_numpy(np.arange(dets.shape[0])).long()
25 | nms.cpu_nms(keep, num_out, dets.float(), order, areas, thresh, ALIGNED_FLAG.offset)
26 | return keep[:num_out[0]].contiguous().to(device=dets.device)
27 |
28 |
29 | def softer_nms(dets, thresh=0.0001, sigma=0.5, iou_thresh=0.3, iou_method='linear', iou_sigma=0.02,
30 | method='var_voting'):
31 | if dets.shape[1] != 9:
32 | raise ValueError('Expected input tensor `dets` of 9-channels, got {}-channels'.format(dets.shape[1]))
33 | device = dets.device
34 | dets = dets.detach()
35 | if not hasattr(softer_nms_c.IOUMethod, iou_method.upper()):
36 | raise TypeError('IoU_method {} not supported'.format(iou_method.upper()))
37 | iou_method = getattr(softer_nms_c.IOUMethod, iou_method.upper())
38 |
39 | if not hasattr(softer_nms_c.NMSMethod, method.upper()):
40 | raise TypeError('Softer NMS method {} not supported'.format(method.upper()))
41 | if method.upper() == 'SOFTER':
42 | raise Warning('Using deprecated softer_nms (method == SOFTER)')
43 | method = getattr(softer_nms_c.NMSMethod, method.upper())
44 | dets = dets.cpu()
45 | N = dets.shape[0]
46 | inds = torch.arange(0, N, dtype=torch.long)
47 | softer_nms_c.cpu_softer_nms(dets, inds, sigma, iou_thresh, iou_method, iou_sigma, method)
48 | keep = (dets[:, 8] > thresh).nonzero().reshape(-1)
49 | dets = dets[keep]
50 | keep = inds[keep]
51 | return dets.to(device=device), keep.to(device=device)
52 |
--------------------------------------------------------------------------------
/up/tasks/nas/bignas/models/heads/big_clshead.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 | from up.utils.model.initializer import trunc_normal_
4 | from up.tasks.nas.bignas.models.ops.dynamic_blocks import DynamicLinearBlock
5 |
6 |
7 | class BigClsHead(nn.Module):
8 | def __init__(self, num_classes, in_plane, input_feature_idx=-1, use_pool=True, dropout=0):
9 | super(BigClsHead, self).__init__()
10 | self.num_classes = num_classes
11 | self.in_plane = in_plane
12 | self.input_feature_idx = input_feature_idx
13 | self.prefix = self.__class__.__name__
14 | self.use_pool = use_pool
15 | self.dropout = dropout
16 | self.build_classifier(in_plane)
17 | self._init_weights()
18 | if self.use_pool:
19 | self.pool = nn.AdaptiveAvgPool2d((1, 1))
20 |
21 | def _init_weights(self):
22 | for m in self.modules():
23 | if isinstance(m, nn.Linear):
24 | trunc_normal_(m.weight, std=0.02)
25 | if m.bias is not None:
26 | nn.init.constant_(m.bias, 0)
27 |
28 | def build_classifier(self, in_plane):
29 | if isinstance(self.num_classes, list) or isinstance(self.num_classes, tuple):
30 | self.classifier = nn.ModuleList()
31 | for cls in self.num_classes:
32 | self.classifier.append(DynamicLinearBlock(in_plane, cls, bias=True, dropout_rate=self.dropout))
33 | self.multicls = True
34 | else:
35 | self.multicls = False
36 | self.classifier = DynamicLinearBlock(in_plane, self.num_classes, bias=True, dropout_rate=self.dropout)
37 |
38 | def get_pool_output(self, x):
39 | if self.use_pool:
40 | x = self.pool(x)
41 | x = x.view(x.size(0), -1)
42 | return x
43 |
44 | def get_logits(self, x):
45 | if self.multicls:
46 | logits = [self.classifier[idx](x) for idx, _ in enumerate(self.num_classes)]
47 | else:
48 | logits = self.classifier(x)
49 | return logits
50 |
51 | def forward_net(self, x):
52 | x = x['features'][self.input_feature_idx]
53 | x = self.get_pool_output(x)
54 | logits = self.get_logits(x)
55 | return {'logits': logits}
56 |
57 | def forward(self, input):
58 | return self.forward_net(input)
59 |
--------------------------------------------------------------------------------
/up/commands/quant_deploy.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 |
3 | # Standard Library
4 | import argparse
5 |
6 |
7 | # Import from local
8 | from .subcommand import Subcommand
9 | from up.utils.general.yaml_loader import load_yaml # IncludeLoader
10 | from up.utils.general.registry_factory import SUBCOMMAND_REGISTRY, DEPLOY_REGISTRY
11 | from up.utils.general.user_analysis_helper import send_info
12 | from up.utils.general.global_flag import QUANT_FLAG
13 |
14 | __all__ = ['QuantDeploy']
15 |
16 |
17 | @SUBCOMMAND_REGISTRY.register('quant_deploy')
18 | class QuantDeploy(Subcommand):
19 | def add_subparser(self, name, parser):
20 | sub_parser = parser.add_parser(name,
21 | description='subcommand for deploying',
22 | help='deploy a model')
23 | sub_parser.add_argument('--config',
24 | dest='config',
25 | required=True,
26 | help='settings of detection in yaml format')
27 | sub_parser.add_argument('--cfg_type',
28 | dest='cfg_type',
29 | type=str,
30 | default='up',
31 | help='config type (up or pod)')
32 | sub_parser.add_argument('--opts',
33 | help='options to replace yaml config',
34 | default=None,
35 | nargs=argparse.REMAINDER)
36 | sub_parser.set_defaults(run=_main)
37 | return sub_parser
38 |
39 |
40 | def main(args):
41 | cfg = load_yaml(args.config, args.cfg_type)
42 | cfg['args'] = {
43 | 'opts': args.opts
44 | }
45 | cfg['runtime'] = cfg.setdefault('runtime', {})
46 | runner_cfg = cfg['runtime'].get('runner', {})
47 | runner_cfg['type'] = runner_cfg.get('type', 'base')
48 | runner_cfg['kwargs'] = runner_cfg.get('kwargs', {})
49 | cfg['runtime']['runner'] = runner_cfg
50 | QUANT_FLAG.flag = True
51 |
52 | send_info(cfg, func="quant_deploy")
53 | if runner_cfg['type'] == "quant":
54 | quant_deploy = DEPLOY_REGISTRY.get("quant")(cfg)
55 | quant_deploy.deploy()
56 | else:
57 | print("Need quant in cfg yaml.")
58 |
59 |
60 | def _main(args):
61 | main(args)
62 |
--------------------------------------------------------------------------------
/docs/source/English/tasks/tasks/ssl.rst:
--------------------------------------------------------------------------------
1 | self-supervision
2 | ================
3 |
4 | UP supports self-supervision to train pretrained model and finetune downstream task;
5 |
6 | `Codes `_
7 |
8 | Config
9 | ------
10 |
11 | * `pretrain `_
12 | * `finetune `_
13 |
14 | pretrain module
15 | ---------------
16 |
17 | The following snippet shows an example config for pretrain task.
18 |
19 | .. code-block:: yaml
20 |
21 | # pretrain backbone config
22 | net: &subnet
23 | - name: backbone
24 | type: ssl # self-supervision sub-task
25 | multi_model: # support multiple backbones
26 | - name: encoder_q
27 | type: resnet50 # type of backbone (found in cls module)
28 | kwargs: # other hyperparameters
29 | frozen_layers: []
30 | ...
31 | - name: encoder_k
32 | type: resnet50 # type of backbone (found in cls module)
33 | kwargs: # other hyperparameters
34 | frozen_layers: []
35 | ...
36 | wrappers:
37 | - type: moco # self-supervision model, support moco, simclr and simsiam
38 | kwargs:
39 | dim: 128
40 | K: 65536
41 |
42 | - name: post_process
43 | type: base_ssl_postprocess # postprocessing module for self-supervision
44 | kwargs:
45 | ssl_loss: # loss function for self-supervision
46 | type: moco_loss
47 |
48 | finetune module
49 | ---------------
50 |
51 | The following snippet shows an example config for finetune task:
52 |
53 | .. code-block:: yaml
54 |
55 | dataset:
56 | type: cls
57 | kwargs:
58 | meta_file: ...
59 | image_reader:
60 | ...
61 | transformer: ...
62 | fraction: 0.1 # label fraction for finetuning
63 |
64 | saver:
65 | save_dir: moco_v1_linear_dist8/checkpoints/cls_std
66 | results_dir: moco_v1_linear_dist8/results_dir/cls_std
67 | auto_resume: True
68 | pretrain_model: moco_v1_bs512/checkpoints/cls_std/ckpt.pth # dir of pretrained model parameters
69 |
--------------------------------------------------------------------------------
/up/tasks/cls/data/data_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def rand_bbox(size, lam):
6 |
7 | W = size[2]
8 | H = size[3]
9 | cut_rat = np.sqrt(1. - lam)
10 | cut_w = np.int(W * cut_rat)
11 | cut_h = np.int(H * cut_rat)
12 |
13 | # uniform
14 | cx = np.random.randint(W)
15 | cy = np.random.randint(H)
16 |
17 | bbx1 = np.clip(cx - cut_w // 2, 0, W)
18 | bby1 = np.clip(cy - cut_h // 2, 0, H)
19 | bbx2 = np.clip(cx + cut_w // 2, 0, W)
20 | bby2 = np.clip(cy + cut_h // 2, 0, H)
21 |
22 | return bbx1, bby1, bbx2, bby2
23 |
24 |
25 | def mixup(data, alpha, num_classes):
26 | if alpha > 0:
27 | lam = np.random.beta(alpha, alpha)
28 | else:
29 | lam = 1
30 | images = data['image']
31 | batch_size = images.size()[0]
32 | index = torch.randperm(batch_size)
33 | mixed_x = lam * images + (1 - lam) * images[index, :]
34 |
35 | if len(data['gt'].shape) > 1:
36 | num_head = data['gt'].shape[1]
37 | labels = torch.zeros(batch_size, num_head, num_classes)
38 | labels.scatter_(2, data.gt.reshape(-1, num_head, 1), 1)
39 | else:
40 | labels = torch.zeros(batch_size, num_classes)
41 | labels.scatter_(1, data.gt.reshape(-1, 1), 1)
42 |
43 | y_a, y_b = labels, labels[index]
44 | mixed_y = lam * y_a + (1 - lam) * y_b
45 |
46 | data['image'] = mixed_x
47 | data['gt'] = mixed_y
48 | return data
49 |
50 |
51 | def cutmix(data, alpha, num_classes):
52 |
53 | if alpha > 0:
54 | lam = np.random.beta(alpha, alpha)
55 | else:
56 | lam = 1
57 |
58 | images = data['image']
59 | batch_size = images.size()[0]
60 | index = torch.randperm(batch_size)
61 | if len(data['gt'].shape) > 1:
62 | num_head = data['gt'].shape[1]
63 | labels = torch.zeros(batch_size, num_head, num_classes)
64 | labels.scatter_(2, data.gt.reshape(-1, num_head, 1), 1)
65 | else:
66 | labels = torch.zeros(batch_size, num_classes)
67 | labels.scatter_(1, data.gt.reshape(-1, 1), 1)
68 | bbx1, bby1, bbx2, bby2 = rand_bbox(data['image'].shape, lam)
69 | images[:, :, bbx1:bbx2, bby1:bby2] = images[index, :, bbx1:bbx2, bby1:bby2]
70 | lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (images.size()[-1] * images.size()[-2]))
71 | data.gt = lam * labels + (1 - lam) * labels[index]
72 | data.image = data.image
73 |
74 | return data
75 |
--------------------------------------------------------------------------------
/configs/ssl/mocov3/moco_v3.yaml:
--------------------------------------------------------------------------------
1 | mocov3: &mocov3
2 | type: torch_mocov3
3 | runtime:
4 | task_names: ssl
5 | dataset: # Required.
6 | train:
7 | dataset:
8 | type: cls
9 | kwargs:
10 | meta_file: images/meta/train.txt
11 | meta_type: moco_imagenet
12 | image_reader:
13 | type: fs_pillow
14 | kwargs:
15 | image_dir: images/train
16 | color_mode: RGB
17 | memcached: true
18 | transformer: [*mocov3]
19 | batch_sampler:
20 | type: base
21 | kwargs:
22 | sampler:
23 | type: dist
24 | kwargs: {}
25 | batch_size: 128
26 | dataloader:
27 | type: cls_base
28 | kwargs:
29 | num_workers: 12
30 | pin_memory: true
31 |
32 | trainer: # Required.
33 | max_epoch: 300
34 | test_freq: 500
35 | save_freq: 5
36 | is_eval: false
37 | only_save_latest: true
38 | optimizer: # optimizer = SGD(params,lr=0.001,momentum=0.9,weight_decay=0.0001)
39 | type: AdamW
40 | kwargs:
41 | lr: 0.00015
42 | weight_decay: 0.1
43 | lr_scheduler: # lr_scheduler = MultStepLR(optimizer, milestones=[9,14],gamma=0.1)
44 | warmup_iter: 50080
45 | warmup_type: linear
46 | warmup_register_type: no_scale_lr
47 | warmup_ratio: 0
48 | type: CosineAnnealingLR
49 | kwargs:
50 | T_max: 300
51 | eta_min: 0.00001
52 | warmup_iter: 0
53 |
54 | saver: # Required.
55 | save_dir: moco_v3_bs1024/checkpoints/cls_std # dir to save checkpoints
56 | results_dir: moco_v3_bs1024/results_dir/cls_std # dir to save detection results. i.e., bboxes, masks, keypoints
57 | auto_resume: true # find last checkpoint from save_dir and resume from it automatically
58 | # this option has the highest priority (auto_resume > opts > resume_model > pretrain_model)
59 |
60 | hooks:
61 | - type: auto_save_best
62 |
63 | net:
64 | - name: backbone
65 | type: ssl
66 | multi_model:
67 | - name: encoder_q
68 | type: moco_vit_small
69 | kwargs:
70 | stop_grad_conv1: true
71 | - name: encoder_k
72 | type: moco_vit_small
73 | kwargs:
74 | stop_grad_conv1: true
75 | wrappers:
76 | - type: moco_vit
77 | kwargs:
78 | T: 0.2
79 |
80 | - name: post_process
81 | type: base_ssl_postprocess
82 | kwargs:
83 | ssl_loss:
84 | type: contrastive_loss
85 | kwargs:
86 | tau: 0.2
87 |
88 |
--------------------------------------------------------------------------------
/docs/source/Chinese/tutorials/guide/augmentations.rst:
--------------------------------------------------------------------------------
1 | 数据预处理
2 | ==========
3 |
4 | UP 支持多种数据增广和前处理,常用数据增广包括Flip,Resize等;
5 | 前处理包括Normalization,To_Tenser,Pad。
6 | 细节如下所示:
7 |
8 | UP 在配置文件中直接引入增广:
9 |
10 | Flip:
11 |
12 | .. code-block:: yaml
13 |
14 | flip: &flip
15 | type: flip
16 | kwargs:
17 | flip_p: 0.5
18 |
19 | Resize:
20 |
21 | .. code-block:: yaml
22 |
23 | resize: &train_resize
24 | type: keep_ar_resize
25 | kwargs:
26 | scales: [640, 672, 704, 736, 768, 800]
27 | max_size: 1333
28 | separate_wh: True
29 |
30 | Normalization:
31 |
32 | .. code-block:: yaml
33 |
34 | normalize: &normalize
35 | type: normalize
36 | kwargs:
37 | mean: [0.485, 0.456, 0.406] # ImageNet pretrained statics
38 | std: [0.229, 0.224, 0.225]
39 |
40 | ToTensor:
41 |
42 | .. code-block:: yaml
43 |
44 | to_tensor: &to_tensor
45 | type: to_tensor
46 |
47 | RandAug: 随机从给定的增强序列中抽取n个增强操作,并根据给定的m值和方差std来决定增强的强度
48 |
49 | .. code-block:: yaml
50 |
51 | random_augmentation: &random_augmentation
52 | type: torch_random_augmentation
53 | kwargs:
54 | n: 2 # 随机抽取的增强个数
55 | m: 7 # 每个增强操作的强度,最高为10
56 | magnitude_std: 0.5 # 强度的方差
57 |
58 | RandAug Increasing: 原始的RandAug中有些操作,m值越小,增强的程度越大,Inceasing版本则是将所有的增强操作统一为m越大增强的强度越大
59 |
60 | .. code-block:: yaml
61 |
62 | random_augmentation: &random_augmentation
63 | type: torch_random_augmentationIncre
64 | kwargs:
65 | n: 2 # 随机抽取的增强个数
66 | m: 7 # 每个增强操作的强度,最高为10
67 | magnitude_std: 0.5 # 强度的方差
68 |
69 | BatchPad: 经常被直接加入到 dataloader 的配置文件中。
70 |
71 | .. code-block:: yaml
72 |
73 | dataloader:
74 | type: base
75 | kwargs:
76 | num_workers: 4
77 | alignment: 32
78 | pad_value: 0
79 | pad_type: batch_pad
80 |
81 | 所有的增广都需要被加入 dataset.kwargs.transformer,如下所示:
82 |
83 | .. code-block:: yaml
84 |
85 | dataset:
86 | type: coco
87 | kwargs:
88 | meta_file: coco/annotations/instances_train2017.json
89 | image_reader:
90 | type: fs_opencv
91 | kwargs:
92 | image_dir: coco/train2017
93 | color_mode: RGB
94 | transformer: [*flip, *train_resize, *to_tensor, *normalize] # add here in order
95 |
--------------------------------------------------------------------------------
/up/tasks/nas/metax/models/decoder/deeplab.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from torch.nn import functional as F
3 | from up.utils.model.normalize import build_norm_layer
4 | from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
5 | from up.models.losses import build_loss
6 | from up.tasks.seg.models.components import Aux_Module, ASPP
7 |
8 | __all__ = ["XMNetDeeplabv3"]
9 |
10 |
11 | @MODULE_ZOO_REGISTRY.register('xmnet_deeplabv3')
12 | class XMNetDeeplabv3(nn.Module):
13 | """
14 | Reference:
15 | Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."*
16 | """
17 | def __init__(self,
18 | inplanes,
19 | aux_inplanes,
20 | num_classes=19,
21 | inner_planes=256,
22 | dilations=(12, 24, 36),
23 | with_aux=True,
24 | normalize={'type': 'solo_bn'},
25 | loss=None):
26 | super(XMNetDeeplabv3, self).__init__()
27 | self.prefix = self.__class__.__name__
28 | self.with_aux = with_aux
29 | self.aspp = ASPP(inplanes, inner_planes=inner_planes, normalize=normalize, dilations=dilations)
30 | self.head = nn.Sequential(
31 | nn.Conv2d(self.aspp.get_outplanes(), 256, kernel_size=3, padding=1, dilation=1, bias=False),
32 | build_norm_layer(256, normalize)[1],
33 | nn.ReLU(inplace=True),
34 | nn.Dropout2d(0.1),
35 | nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
36 | if self.with_aux:
37 | self.aux_layer = Aux_Module(aux_inplanes, num_classes, normalize=normalize)
38 | self.loss = build_loss(loss)
39 |
40 | def forward(self, x):
41 | x1, x2, x3, x4 = x['features']
42 | size = x['size']
43 | aspp_out = self.aspp(x4)
44 | pred = self.head(aspp_out)
45 |
46 | pred = F.upsample(pred, size=size, mode='bilinear', align_corners=True)
47 | if self.training and self.with_aux:
48 | gt_seg = x['gt_semantic_seg']
49 | aux_pred = self.aux_layer(x3)
50 | aux_pred = F.upsample(aux_pred, size=size, mode='bilinear', align_corners=True)
51 | pred = pred, aux_pred
52 | loss = self.loss(pred, gt_seg)
53 | return {f"{self.prefix}.loss": loss, "blob_pred": pred[0]}
54 | else:
55 | return {"blob_pred": pred}
56 |
--------------------------------------------------------------------------------
/up/tasks/seg/data/seg_dataloader.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import DataLoader
2 | # import torch
3 | from easydict import EasyDict
4 |
5 | from up.utils.general.registry_factory import DATALOADER_REGISTRY
6 | from up.data.samplers.batch_sampler import InfiniteBatchSampler
7 | from up.utils.general.registry_factory import BATCHING_REGISTRY
8 |
9 |
10 | __all__ = ['SegDataLoader']
11 |
12 |
13 | def expand_seg(seg):
14 | if seg.dim() == 3:
15 | assert seg.size(0) == 1, seg.size()
16 | return seg
17 | else:
18 | return seg[None, :, :]
19 |
20 |
21 | @DATALOADER_REGISTRY.register('seg_base')
22 | class SegDataLoader(DataLoader):
23 |
24 | def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
25 | num_workers=0, pin_memory=False, drop_last=False,
26 | alignment=1, pad_type='batch_pad', pad_value=[0, 255]
27 | ):
28 | super(SegDataLoader, self).__init__(
29 | dataset, batch_size, shuffle, sampler, batch_sampler, num_workers,
30 | self._collate_fn, pin_memory, drop_last)
31 | self.alignment = alignment
32 | self.pad_image = BATCHING_REGISTRY.get(pad_type)(alignment, pad_value[0])
33 | self.pad_label = BATCHING_REGISTRY.get(pad_type)(alignment, pad_value[1])
34 |
35 | def _collate_fn(self, batch):
36 | images = [_.image for _ in batch]
37 | gt_semantic_seg = [_.get('gt_semantic_seg', None) for _ in batch]
38 | image_info = [_.get('image_info', None) for _ in batch]
39 | output = EasyDict({
40 | 'image': images,
41 | 'image_info': image_info
42 | })
43 | if gt_semantic_seg[0] is not None:
44 | gt_semantic_seg = gt_semantic_seg
45 | output.gt_semantic_seg = gt_semantic_seg
46 |
47 | if self.alignment > 0: # when size not match, directly cat will fail
48 | output = self.pad_image(output) # image
49 | if gt_semantic_seg[0] is not None:
50 | fake_dict = {'image': [expand_seg(seg) for seg in gt_semantic_seg]}
51 | output['gt_semantic_seg'] = self.pad_label(fake_dict)['image'][:, 0, :, :]
52 |
53 | return output
54 |
55 | def get_epoch_size(self):
56 | if isinstance(self.batch_sampler, InfiniteBatchSampler):
57 | return len(self.batch_sampler.batch_sampler) # training
58 | return len(self.batch_sampler)
59 |
--------------------------------------------------------------------------------
/up/extensions/csrc/psroi_pooling/psroi_pooling.h:
--------------------------------------------------------------------------------
1 | #ifndef PSROIPOOLING_H_
2 | #define PSROIPOOLING_H_
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
9 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
10 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11 |
12 | // for tocaffe support
13 | int psroi_pooling_forward(int pooled_height,
14 | int pooled_width,
15 | int output_dim,
16 | float spatial_scale,
17 | at::Tensor features,
18 | at::Tensor rois,
19 | at::Tensor output,
20 | at::Tensor mapping_channel);
21 |
22 | int PSROIPoolForwardLaucher(
23 | at::Tensor bottom_data, const float spatial_scale, const int num_rois, const int output_dim, const int size_rois,
24 | const int height, const int width, const int channels, const int pooled_height,
25 | const int pooled_width, at::Tensor bottom_rois,
26 | at::Tensor top_data, at::Tensor mapping_channel);
27 |
28 | int PSROIPoolBackwardLaucher(at::Tensor top_diff, const float spatial_scale, const int batch_size, const int num_rois,
29 | const int output_dim, const int size_rois, const int height, const int width, const int channels, const int pooled_height,
30 | const int pooled_width, at::Tensor bottom_rois,
31 | at::Tensor bottom_diff, at::Tensor mapping_channel);
32 |
33 | int psroi_pooling_forward_cuda(int pooled_height,
34 | int pooled_width,
35 | int output_dim,
36 | float spatial_scale,
37 | at::Tensor features,
38 | at::Tensor rois,
39 | at::Tensor output,
40 | at::Tensor mapping_channel);
41 |
42 | int psroi_pooling_backward_cuda(int pooled_height,
43 | int pooled_width,
44 | int output_dim,
45 | float spatial_scale,
46 | at::Tensor top_grad,
47 | at::Tensor rois,
48 | at::Tensor bottom_grad,
49 | at::Tensor mapping_channel);
50 |
51 | #endif
52 |
--------------------------------------------------------------------------------
/docs/source/English/tutorials/guide/environment.rst:
--------------------------------------------------------------------------------
1 | Environment
2 | ===========
3 |
4 | Introducing the function which can be realized by changing environment variables.
5 |
6 | .. code-block:: bash
7 |
8 | export xxx=xxxx
9 |
10 | up.__init__
11 | -----------
12 |
13 | UP can add 'plugins' into environment variables to read its path.
14 |
15 | .. code-block:: python
16 |
17 | _PPPATH = 'PLUGINPATH'
18 | if _PPPATH not in os.environ:
19 | return
20 | path_list = os.environ[_PPPATH].split(':')
21 |
22 | up.runner.base_runner
23 | ---------------------
24 |
25 | UP adds batchsize, display, dataset_timer into environment variables.
26 |
27 | * BATCH_SIZE controls batchsize。
28 |
29 | * DISPLAY_FREQ controls the frequency of printing information in running.
30 |
31 | * DATASET_TIMER_ENABLED and DATASET_TIMER_THRESHOLD controls showing the time about dataset related operatioins.
32 |
33 | .. code-block:: python
34 |
35 | os.environ['BATCH_SIZE'] = str(batch_size)
36 | os.environ['DISPLAY_FREQ'] = str(self.config['args'].get('display', 1)) # The interval of iterations for showing.
37 | if cfg_dataset_timer: # The time evalutor for datasets.
38 | os.environ['DATASET_TIMER_ENABLED'] = str(1 if cfg_dataset_timer['enabled'] is True else 0)
39 | os.environ['DATASET_TIMER_THRESHOLD'] = str(cfg_dataset_timer['threshold_seconds'])
40 |
41 | up.tasks.__init__
42 | -----------------
43 |
44 | UP adds DEFAULT_TASKS and EXCLUDE_TASKS into environment variables to control the loading of tasks.
45 |
46 | * DEFAULT_TASKS: all tasks under the 'tasks' folder.
47 |
48 | * EXCLUDE_TASKS: the task which will not be loaded.
49 |
50 | .. code-block:: python
51 |
52 | pwd = os.path.dirname(os.path.realpath(__file__))
53 | tasks_names = os.environ.get("DEFAULT_TASKS", os.listdir(pwd)) # loading all tasks.
54 | exclude_tasks = os.environ.get("EXCLUDE_TASKS", '').split(":") # excluding the writing task.
55 |
56 | up.utils.general.petrel_helper
57 | ------------------------------
58 |
59 | UP adds PETRELPATH into environment variables to control the loading of softwares.
60 |
61 | .. code-block:: python
62 |
63 | default_conf_path = os.environ.get('PETRELPATH', '~/petreloss.conf')
64 |
65 | up.utils.general.registry
66 | -------------------------
67 |
68 | UP adds REGTRACE into environment variables to control the registers.
69 |
70 | .. code-block:: python
71 |
72 | _REG_TRACE_IS_ON = os.environ.get('REGTRACE', 'OFF').upper() == 'ON'
73 |
--------------------------------------------------------------------------------