├── dataset
├── voc
│ ├── label_list.txt
│ └── download_voc.py
├── fruit
│ ├── label_list.txt
│ └── download_fruit.py
├── wider_face
│ └── download.sh
├── fddb
│ └── download.sh
└── coco
│ └── download_coco.py
├── dygraph
├── ppdet
│ ├── model_zoo
│ │ ├── .gitignore
│ │ └── __init__.py
│ ├── py_op
│ │ └── __init__.py
│ ├── modeling
│ │ ├── proposal_generator
│ │ │ └── __init__.py
│ │ ├── backbones
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ ├── tests
│ │ │ └── __init__.py
│ │ ├── utils
│ │ │ └── __init__.py
│ │ ├── architectures
│ │ │ ├── __init__.py
│ │ │ └── meta_arch.py
│ │ ├── necks
│ │ │ └── __init__.py
│ │ ├── losses
│ │ │ └── __init__.py
│ │ └── heads
│ │ │ └── __init__.py
│ ├── utils
│ │ └── __init__.py
│ ├── core
│ │ ├── config
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── slim
│ │ └── __init__.py
│ ├── __init__.py
│ ├── metrics
│ │ └── __init__.py
│ ├── data
│ │ ├── transform
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ └── source
│ │ │ └── __init__.py
│ └── engine
│ │ └── __init__.py
├── .style.yapf
├── dataset
│ ├── roadsign_voc
│ │ ├── label_list.txt
│ │ └── download_roadsign_voc.py
│ ├── voc
│ │ ├── label_list.txt
│ │ ├── create_list.py
│ │ └── download_voc.py
│ └── coco
│ │ └── download_coco.py
├── configs
│ ├── runtime.yml
│ ├── vehicle
│ │ ├── demo
│ │ │ ├── 001.jpeg
│ │ │ ├── 003.png
│ │ │ ├── 004.png
│ │ │ └── 005.png
│ │ └── vehicle_yolov3_darknet.yml
│ ├── pedestrian
│ │ ├── demo
│ │ │ ├── 001.png
│ │ │ ├── 002.png
│ │ │ ├── 003.png
│ │ │ └── 004.png
│ │ └── pedestrian_yolov3_darknet.yml
│ ├── ssd
│ │ ├── ssd_vgg16_300_240e_voc.yml
│ │ ├── ssd_mobilenet_v1_300_120e_voc.yml
│ │ ├── ssdlite_mobilenet_v1_300_coco.yml
│ │ ├── ssdlite_mobilenet_v3_large_320_coco.yml
│ │ ├── ssdlite_mobilenet_v3_small_320_coco.yml
│ │ └── _base_
│ │ │ ├── optimizer_1700e.yml
│ │ │ ├── optimizer_120e.yml
│ │ │ ├── optimizer_240e.yml
│ │ │ ├── ssd_vgg16_300.yml
│ │ │ ├── ssdlite_mobilenet_v1_300.yml
│ │ │ ├── ssd_mobilenet_v1_300.yml
│ │ │ ├── ssdlite_mobilenet_v3_large_320.yml
│ │ │ ├── ssdlite_mobilenet_v3_small_320.yml
│ │ │ └── ssd_reader.yml
│ ├── mask_rcnn
│ │ ├── mask_rcnn_r50_1x_coco.yml
│ │ ├── mask_rcnn_r50_fpn_1x_coco.yml
│ │ ├── mask_rcnn_r50_2x_coco.yml
│ │ ├── mask_rcnn_r50_fpn_2x_coco.yml
│ │ ├── _base_
│ │ │ ├── optimizer_1x.yml
│ │ │ └── mask_fpn_reader.yml
│ │ ├── mask_rcnn_r101_fpn_1x_coco.yml
│ │ ├── mask_rcnn_r101_vd_fpn_1x_coco.yml
│ │ ├── mask_rcnn_r50_vd_fpn_1x_coco.yml
│ │ ├── mask_rcnn_r50_vd_fpn_2x_coco.yml
│ │ ├── mask_rcnn_x101_vd_64x4d_fpn_1x_coco.yml
│ │ └── mask_rcnn_x101_vd_64x4d_fpn_2x_coco.yml
│ ├── solov2
│ │ ├── solov2_r50_fpn_1x_coco.yml
│ │ ├── _base_
│ │ │ ├── optimizer_1x.yml
│ │ │ └── solov2_r50_fpn.yml
│ │ └── solov2_r50_fpn_3x_coco.yml
│ ├── faster_rcnn
│ │ ├── faster_rcnn_r50_1x_coco.yml
│ │ ├── faster_rcnn_r50_fpn_1x_coco.yml
│ │ ├── faster_rcnn_r50_fpn_2x_coco.yml
│ │ ├── _base_
│ │ │ ├── optimizer_1x.yml
│ │ │ └── faster_fpn_reader.yml
│ │ ├── faster_rcnn_r101_1x_coco.yml
│ │ ├── faster_rcnn_r34_fpn_1x_coco.yml
│ │ ├── faster_rcnn_r101_fpn_1x_coco.yml
│ │ ├── faster_rcnn_r50_vd_1x_coco.yml
│ │ ├── faster_rcnn_r101_vd_fpn_1x_coco.yml
│ │ ├── faster_rcnn_r50_vd_fpn_1x_coco.yml
│ │ ├── faster_rcnn_r34_vd_fpn_1x_coco.yml
│ │ ├── faster_rcnn_x101_vd_64x4d_fpn_1x_coco.yml
│ │ ├── faster_rcnn_r101_fpn_2x_coco.yml
│ │ ├── faster_rcnn_r50_vd_fpn_2x_coco.yml
│ │ ├── faster_rcnn_r101_vd_fpn_2x_coco.yml
│ │ └── faster_rcnn_x101_vd_64x4d_fpn_2x_coco.yml
│ ├── fcos
│ │ ├── fcos_r50_fpn_1x_coco.yml
│ │ ├── _base_
│ │ │ ├── optimizer_1x.yml
│ │ │ └── fcos_r50_fpn.yml
│ │ ├── fcos_dcn_r50_fpn_1x_coco.yml
│ │ └── fcos_r50_fpn_multiscale_2x_coco.yml
│ ├── ttfnet
│ │ ├── ttfnet_darknet53_1x_coco.yml
│ │ ├── _base_
│ │ │ ├── optimizer_1x.yml
│ │ │ ├── ttfnet_darknet53.yml
│ │ │ └── ttfnet_reader.yml
│ │ └── README.md
│ ├── cascade_rcnn
│ │ ├── cascade_rcnn_r50_fpn_1x_coco.yml
│ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco.yml
│ │ └── _base_
│ │ │ ├── optimizer_1x.yml
│ │ │ └── cascade_fpn_reader.yml
│ ├── ppyolo
│ │ ├── ppyolo_r50vd_dcn_1x_coco.yml
│ │ ├── ppyolo_r50vd_dcn_2x_coco.yml
│ │ ├── ppyolo_test.yml
│ │ ├── _base_
│ │ │ ├── optimizer_1x.yml
│ │ │ └── optimizer_2x.yml
│ │ └── ppyolo_r50vd_dcn_1x_minicoco.yml
│ ├── yolov3
│ │ ├── yolov3_darknet53_270e_coco.yml
│ │ ├── yolov3_r50vd_dcn_270e_coco.yml
│ │ ├── yolov3_mobilenet_v1_270e_coco.yml
│ │ ├── yolov3_mobilenet_v3_large_270e_coco.yml
│ │ └── _base_
│ │ │ ├── optimizer_270e.yml
│ │ │ ├── yolov3_darknet53.yml
│ │ │ ├── yolov3_mobilenet_v1.yml
│ │ │ ├── yolov3_r50vd_dcn.yml
│ │ │ ├── yolov3_mobilenet_v3_small.yml
│ │ │ └── yolov3_mobilenet_v3_large.yml
│ ├── dcn
│ │ ├── faster_rcnn_dcn_r50_vd_fpn_1x_coco.yml
│ │ ├── mask_rcnn_dcn_r101_vd_fpn_1x_coco.yml
│ │ ├── faster_rcnn_dcn_r101_vd_fpn_1x_coco.yml
│ │ ├── mask_rcnn_dcn_r50_fpn_1x_coco.yml
│ │ ├── faster_rcnn_dcn_r50_fpn_1x_coco.yml
│ │ ├── cascade_rcnn_dcn_r50_fpn_1x_coco.yml
│ │ ├── cascade_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco.yml
│ │ ├── mask_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco.yml
│ │ ├── faster_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco.yml
│ │ ├── mask_rcnn_dcn_r50_vd_fpn_2x_coco.yml
│ │ └── faster_rcnn_dcn_r50_vd_fpn_2x_coco.yml
│ ├── datasets
│ │ ├── coco_detection.yml
│ │ ├── coco_instance.yml
│ │ ├── voc.yml
│ │ └── roadsign_voc.yml
│ ├── slim
│ │ ├── quant
│ │ │ ├── yolov3_mobilenet_v1_qat.yml
│ │ │ └── yolov3_mobilenet_v3_qat.yml
│ │ └── prune
│ │ │ ├── yolov3_prune_fpgm.yml
│ │ │ └── yolov3_prune_l1_norm.yml
│ ├── hrnet
│ │ ├── faster_rcnn_hrnetv2p_w18_1x_coco.yml
│ │ └── faster_rcnn_hrnetv2p_w18_2x_coco.yml
│ └── gn
│ │ ├── cascade_rcnn_r50_fpn_gn_2x.yml
│ │ ├── faster_rcnn_r50_fpn_gn_2x_coco.yml
│ │ └── README.md
├── demo
│ ├── road554.png
│ ├── orange_71.jpg
│ ├── 000000014439.jpg
│ ├── 000000087038.jpg
│ ├── 000000570688.jpg
│ └── 000000014439_640x640.jpg
├── requirements.txt
├── docs
│ └── images
│ │ ├── road554.png
│ │ └── 000000014439.jpg
├── .pre-commit-config.yaml
├── deploy
│ └── cpp
│ │ └── cmake
│ │ └── yaml-cpp.cmake
└── .gitignore
├── deploy
├── android_demo
│ ├── settings.gradle
│ ├── demo
│ │ ├── ppdet_app.png
│ │ ├── ppdet_app_home.jpg
│ │ ├── ppdet_app_camera.jpg
│ │ └── ppdet_app_photo.jpg
│ ├── gradle
│ │ └── wrapper
│ │ │ ├── gradle-wrapper.jar
│ │ │ └── gradle-wrapper.properties
│ ├── app
│ │ ├── src
│ │ │ ├── main
│ │ │ │ ├── assets
│ │ │ │ │ ├── images
│ │ │ │ │ │ ├── home.jpg
│ │ │ │ │ │ └── kite.jpg
│ │ │ │ │ └── labels
│ │ │ │ │ │ ├── coco-labels-2014_2017.txt
│ │ │ │ │ │ └── coco-labels-background.txt
│ │ │ │ └── res
│ │ │ │ │ ├── drawable
│ │ │ │ │ ├── photo.png
│ │ │ │ │ ├── camera.png
│ │ │ │ │ ├── photo1.png
│ │ │ │ │ ├── btn_shutter.xml
│ │ │ │ │ ├── btn_switch.xml
│ │ │ │ │ ├── btn_settings.xml
│ │ │ │ │ ├── btn_shutter_default.xml
│ │ │ │ │ └── btn_shutter_pressed.xml
│ │ │ │ │ ├── drawable-v24
│ │ │ │ │ ├── camera.png
│ │ │ │ │ └── photo.png
│ │ │ │ │ ├── drawable-xxhdpi-v4
│ │ │ │ │ ├── photo.png
│ │ │ │ │ ├── camera.png
│ │ │ │ │ ├── btn_switch_default.png
│ │ │ │ │ └── btn_switch_pressed.png
│ │ │ │ │ ├── mipmap-hdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-mdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-xhdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-xxhdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-xxxhdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-anydpi-v26
│ │ │ │ │ ├── ic_launcher.xml
│ │ │ │ │ └── ic_launcher_round.xml
│ │ │ │ │ ├── values
│ │ │ │ │ ├── colors.xml
│ │ │ │ │ ├── dimens.xml
│ │ │ │ │ ├── styles.xml
│ │ │ │ │ └── arrays.xml
│ │ │ │ │ ├── menu
│ │ │ │ │ └── menu_main.xml
│ │ │ │ │ └── layout
│ │ │ │ │ ├── content_main.xml
│ │ │ │ │ └── activity_main.xml
│ │ │ ├── test
│ │ │ │ └── java
│ │ │ │ │ └── com
│ │ │ │ │ └── baidu
│ │ │ │ │ └── paddledetection
│ │ │ │ │ └── detection
│ │ │ │ │ └── ExampleUnitTest.java
│ │ │ └── androidTest
│ │ │ │ └── java
│ │ │ │ └── com
│ │ │ │ └── baidu
│ │ │ │ └── paddledetection
│ │ │ │ └── detection
│ │ │ │ └── ExampleInstrumentedTest.java
│ │ ├── local.properties
│ │ └── proguard-rules.pro
│ ├── local.properties
│ ├── build.gradle
│ ├── paddledetection_demo.iml
│ └── gradle.properties
├── README.md
└── cpp
│ └── cmake
│ └── yaml-cpp.cmake
├── requirements.txt
├── slim
├── nas
│ └── search_space
│ │ └── __init__.py
├── quantization
│ └── images
│ │ ├── FreezePass.png
│ │ ├── TransformPass.png
│ │ ├── ConvertToInt8Pass.png
│ │ └── TransformForMobilePass.png
└── sensitive
│ └── images
│ └── mobilev1_yolov3_voc_sensitives.png
├── demo
├── orange_71.jpg
├── road554.png
├── 000000014439.jpg
├── 000000087038.jpg
├── 000000570688.jpg
├── 000000014439_640x640.jpg
└── infer_cfg.yml
├── ppdet
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ └── optimizer.cpython-37.pyc
├── utils
│ ├── __pycache__
│ │ ├── cli.cpython-37.pyc
│ │ ├── check.cpython-37.pyc
│ │ ├── stats.cpython-37.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ ├── coco_eval.cpython-37.pyc
│ │ ├── download.cpython-37.pyc
│ │ ├── map_utils.cpython-37.pyc
│ │ ├── voc_eval.cpython-37.pyc
│ │ ├── voc_utils.cpython-37.pyc
│ │ ├── bbox_utils.cpython-37.pyc
│ │ ├── checkpoint.cpython-37.pyc
│ │ ├── dist_utils.cpython-37.pyc
│ │ ├── eval_utils.cpython-37.pyc
│ │ └── post_process.cpython-37.pyc
│ └── __init__.py
├── data
│ ├── __pycache__
│ │ ├── reader.cpython-37.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ └── parallel_map.cpython-37.pyc
│ ├── source
│ │ ├── __pycache__
│ │ │ ├── voc.cpython-37.pyc
│ │ │ ├── coco.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── dataset.cpython-37.pyc
│ │ │ └── widerface.cpython-37.pyc
│ │ └── __init__.py
│ ├── shared_queue
│ │ ├── __pycache__
│ │ │ ├── queue.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── sharedmemory.cpython-37.pyc
│ │ └── __init__.py
│ ├── transform
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── op_helper.cpython-37.pyc
│ │ │ ├── operators.cpython-37.pyc
│ │ │ └── batch_operators.cpython-37.pyc
│ │ └── __init__.py
│ └── __init__.py
├── modeling
│ ├── __pycache__
│ │ ├── ops.cpython-37.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ └── target_assigners.cpython-37.pyc
│ ├── backbones
│ │ └── __pycache__
│ │ │ ├── bfp.cpython-37.pyc
│ │ │ ├── fpn.cpython-37.pyc
│ │ │ ├── vgg.cpython-37.pyc
│ │ │ ├── acfpn.cpython-37.pyc
│ │ │ ├── bifpn.cpython-37.pyc
│ │ │ ├── hrfpn.cpython-37.pyc
│ │ │ ├── hrnet.cpython-37.pyc
│ │ │ ├── resnet.cpython-37.pyc
│ │ │ ├── senet.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── blazenet.cpython-37.pyc
│ │ │ ├── darknet.cpython-37.pyc
│ │ │ ├── gc_block.cpython-37.pyc
│ │ │ ├── ghostnet.cpython-37.pyc
│ │ │ ├── res2net.cpython-37.pyc
│ │ │ ├── resnext.cpython-37.pyc
│ │ │ ├── cb_resnet.cpython-37.pyc
│ │ │ ├── cspdarknet.cpython-37.pyc
│ │ │ ├── faceboxnet.cpython-37.pyc
│ │ │ ├── hourglass.cpython-37.pyc
│ │ │ ├── mobilenet.cpython-37.pyc
│ │ │ ├── efficientnet.cpython-37.pyc
│ │ │ ├── mobilenet_v3.cpython-37.pyc
│ │ │ ├── name_adapter.cpython-37.pyc
│ │ │ └── nonlocal_helper.cpython-37.pyc
│ ├── losses
│ │ └── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── diou_loss.cpython-37.pyc
│ │ │ ├── fcos_loss.cpython-37.pyc
│ │ │ ├── giou_loss.cpython-37.pyc
│ │ │ ├── iou_loss.cpython-37.pyc
│ │ │ ├── yolo_loss.cpython-37.pyc
│ │ │ ├── solov2_loss.cpython-37.pyc
│ │ │ ├── diou_loss_yolo.cpython-37.pyc
│ │ │ ├── iou_aware_loss.cpython-37.pyc
│ │ │ ├── smooth_l1_loss.cpython-37.pyc
│ │ │ ├── balanced_l1_loss.cpython-37.pyc
│ │ │ └── ssd_with_lmk_loss.cpython-37.pyc
│ ├── architectures
│ │ └── __pycache__
│ │ │ ├── fcos.cpython-37.pyc
│ │ │ ├── htc.cpython-37.pyc
│ │ │ ├── ssd.cpython-37.pyc
│ │ │ ├── yolo.cpython-37.pyc
│ │ │ ├── solov2.cpython-37.pyc
│ │ │ ├── ttfnet.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── blazeface.cpython-37.pyc
│ │ │ ├── faceboxes.cpython-37.pyc
│ │ │ ├── mask_rcnn.cpython-37.pyc
│ │ │ ├── retinanet.cpython-37.pyc
│ │ │ ├── cascade_rcnn.cpython-37.pyc
│ │ │ ├── efficientdet.cpython-37.pyc
│ │ │ ├── faster_rcnn.cpython-37.pyc
│ │ │ ├── input_helper.cpython-37.pyc
│ │ │ ├── cascade_mask_rcnn.cpython-37.pyc
│ │ │ ├── cornernet_squeeze.cpython-37.pyc
│ │ │ └── cascade_rcnn_cls_aware.cpython-37.pyc
│ ├── mask_head
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── solo_mask_head.cpython-37.pyc
│ │ └── __init__.py
│ ├── roi_heads
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── bbox_head.cpython-37.pyc
│ │ │ ├── mask_head.cpython-37.pyc
│ │ │ ├── cascade_head.cpython-37.pyc
│ │ │ ├── htc_bbox_head.cpython-37.pyc
│ │ │ ├── htc_mask_head.cpython-37.pyc
│ │ │ └── htc_semantic_head.cpython-37.pyc
│ │ └── __init__.py
│ ├── anchor_heads
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── rpn_head.cpython-37.pyc
│ │ │ ├── ttf_head.cpython-37.pyc
│ │ │ ├── fcos_head.cpython-37.pyc
│ │ │ ├── iou_aware.cpython-37.pyc
│ │ │ ├── yolo_head.cpython-37.pyc
│ │ │ ├── corner_head.cpython-37.pyc
│ │ │ ├── retina_head.cpython-37.pyc
│ │ │ ├── solov2_head.cpython-37.pyc
│ │ │ └── efficient_head.cpython-37.pyc
│ │ └── __init__.py
│ ├── roi_extractors
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── roi_extractor.cpython-37.pyc
│ │ └── __init__.py
│ ├── tests
│ │ ├── __init__.py
│ │ └── decorator_helper.py
│ └── __init__.py
├── core
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ └── workspace.cpython-37.pyc
│ ├── config
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── schema.cpython-37.pyc
│ │ │ └── yaml_helpers.cpython-37.pyc
│ │ └── __init__.py
│ └── __init__.py
├── experimental
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ └── mixed_precision.cpython-37.pyc
│ └── __init__.py
├── __init__.py
└── ext_op
│ ├── __init__.py
│ └── src
│ └── make.sh
├── configs
├── face_detection
│ └── README.md
├── gridmask
│ └── README.md
├── acfpn
│ └── README.md
├── random_erasing
│ └── README.md
└── htc
│ └── README.md
└── tools
└── __init__.py
/dataset/voc/label_list.txt:
--------------------------------------------------------------------------------
1 | person
2 |
--------------------------------------------------------------------------------
/dygraph/ppdet/model_zoo/.gitignore:
--------------------------------------------------------------------------------
1 | MODEL_ZOO
2 |
--------------------------------------------------------------------------------
/deploy/android_demo/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':app'
2 |
--------------------------------------------------------------------------------
/dataset/fruit/label_list.txt:
--------------------------------------------------------------------------------
1 | apple
2 | banana
3 | orange
4 |
--------------------------------------------------------------------------------
/dygraph/ppdet/py_op/__init__.py:
--------------------------------------------------------------------------------
1 | from .post_process import *
2 |
--------------------------------------------------------------------------------
/dygraph/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | based_on_style = pep8
3 | column_limit = 80
4 |
--------------------------------------------------------------------------------
/dygraph/dataset/roadsign_voc/label_list.txt:
--------------------------------------------------------------------------------
1 | speedlimit
2 | crosswalk
3 | trafficlight
4 | stop
--------------------------------------------------------------------------------
/dygraph/configs/runtime.yml:
--------------------------------------------------------------------------------
1 | use_gpu: true
2 | log_iter: 20
3 | save_dir: output
4 | snapshot_epoch: 1
5 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/proposal_generator/__init__.py:
--------------------------------------------------------------------------------
1 | from . import rpn_head
2 | from .rpn_head import *
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | tqdm
2 | typeguard ; python_version >= '3.4'
3 | visualdl>=2.0.0b
4 | opencv-python
5 | PyYAML
6 | shapely
7 |
--------------------------------------------------------------------------------
/slim/nas/search_space/__init__.py:
--------------------------------------------------------------------------------
1 | from .blazefacespace_nas import BlazeFaceNasSpace
2 |
3 | __all__ = ['BlazeFaceNasSpace']
4 |
--------------------------------------------------------------------------------
/demo/orange_71.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/demo/orange_71.jpg
--------------------------------------------------------------------------------
/demo/road554.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/demo/road554.png
--------------------------------------------------------------------------------
/demo/000000014439.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/demo/000000014439.jpg
--------------------------------------------------------------------------------
/demo/000000087038.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/demo/000000087038.jpg
--------------------------------------------------------------------------------
/demo/000000570688.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/demo/000000570688.jpg
--------------------------------------------------------------------------------
/dygraph/demo/road554.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/demo/road554.png
--------------------------------------------------------------------------------
/dygraph/requirements.txt:
--------------------------------------------------------------------------------
1 | tqdm
2 | typeguard ; python_version >= '3.4'
3 | visualdl>=2.0.0b
4 | opencv-python
5 | PyYAML
6 | shapely
7 | scipy
8 |
--------------------------------------------------------------------------------
/dygraph/demo/orange_71.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/demo/orange_71.jpg
--------------------------------------------------------------------------------
/demo/000000014439_640x640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/demo/000000014439_640x640.jpg
--------------------------------------------------------------------------------
/dygraph/demo/000000014439.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/demo/000000014439.jpg
--------------------------------------------------------------------------------
/dygraph/demo/000000087038.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/demo/000000087038.jpg
--------------------------------------------------------------------------------
/dygraph/demo/000000570688.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/demo/000000570688.jpg
--------------------------------------------------------------------------------
/dygraph/docs/images/road554.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/docs/images/road554.png
--------------------------------------------------------------------------------
/deploy/android_demo/demo/ppdet_app.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/demo/ppdet_app.png
--------------------------------------------------------------------------------
/dygraph/configs/vehicle/demo/001.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/vehicle/demo/001.jpeg
--------------------------------------------------------------------------------
/dygraph/configs/vehicle/demo/003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/vehicle/demo/003.png
--------------------------------------------------------------------------------
/dygraph/configs/vehicle/demo/004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/vehicle/demo/004.png
--------------------------------------------------------------------------------
/dygraph/configs/vehicle/demo/005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/vehicle/demo/005.png
--------------------------------------------------------------------------------
/dygraph/demo/000000014439_640x640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/demo/000000014439_640x640.jpg
--------------------------------------------------------------------------------
/dygraph/docs/images/000000014439.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/docs/images/000000014439.jpg
--------------------------------------------------------------------------------
/dygraph/configs/pedestrian/demo/001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/pedestrian/demo/001.png
--------------------------------------------------------------------------------
/dygraph/configs/pedestrian/demo/002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/pedestrian/demo/002.png
--------------------------------------------------------------------------------
/dygraph/configs/pedestrian/demo/003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/pedestrian/demo/003.png
--------------------------------------------------------------------------------
/dygraph/configs/pedestrian/demo/004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/dygraph/configs/pedestrian/demo/004.png
--------------------------------------------------------------------------------
/slim/quantization/images/FreezePass.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/slim/quantization/images/FreezePass.png
--------------------------------------------------------------------------------
/deploy/android_demo/demo/ppdet_app_home.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/demo/ppdet_app_home.jpg
--------------------------------------------------------------------------------
/ppdet/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/__pycache__/optimizer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/__pycache__/optimizer.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/cli.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/cli.cpython-37.pyc
--------------------------------------------------------------------------------
/slim/quantization/images/TransformPass.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/slim/quantization/images/TransformPass.png
--------------------------------------------------------------------------------
/deploy/android_demo/demo/ppdet_app_camera.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/demo/ppdet_app_camera.jpg
--------------------------------------------------------------------------------
/deploy/android_demo/demo/ppdet_app_photo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/demo/ppdet_app_photo.jpg
--------------------------------------------------------------------------------
/ppdet/data/__pycache__/reader.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/__pycache__/reader.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/__pycache__/ops.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/__pycache__/ops.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/check.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/check.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/stats.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/stats.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/core/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/core/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/core/__pycache__/workspace.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/core/__pycache__/workspace.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/source/__pycache__/voc.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/source/__pycache__/voc.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/coco_eval.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/coco_eval.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/download.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/download.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/map_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/map_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/voc_eval.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/voc_eval.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/voc_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/voc_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/slim/quantization/images/ConvertToInt8Pass.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/slim/quantization/images/ConvertToInt8Pass.png
--------------------------------------------------------------------------------
/ppdet/data/__pycache__/parallel_map.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/__pycache__/parallel_map.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/source/__pycache__/coco.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/source/__pycache__/coco.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/bbox_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/bbox_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/checkpoint.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/checkpoint.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/dist_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/dist_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/eval_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/eval_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/deploy/android_demo/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/ppdet/core/config/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/core/config/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/core/config/__pycache__/schema.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/core/config/__pycache__/schema.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/source/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/source/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/source/__pycache__/dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/source/__pycache__/dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/utils/__pycache__/post_process.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/utils/__pycache__/post_process.cpython-37.pyc
--------------------------------------------------------------------------------
/slim/quantization/images/TransformForMobilePass.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/slim/quantization/images/TransformForMobilePass.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/assets/images/home.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/assets/images/home.jpg
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/assets/images/kite.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/assets/images/kite.jpg
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/photo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable/photo.png
--------------------------------------------------------------------------------
/ppdet/data/source/__pycache__/widerface.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/source/__pycache__/widerface.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/experimental/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/experimental/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/bfp.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/bfp.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/fpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/fpn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/vgg.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/vgg.cpython-37.pyc
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/camera.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable/camera.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/photo1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable/photo1.png
--------------------------------------------------------------------------------
/ppdet/core/config/__pycache__/yaml_helpers.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/core/config/__pycache__/yaml_helpers.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/shared_queue/__pycache__/queue.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/shared_queue/__pycache__/queue.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/transform/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/transform/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/transform/__pycache__/op_helper.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/transform/__pycache__/op_helper.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/transform/__pycache__/operators.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/transform/__pycache__/operators.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/__pycache__/target_assigners.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/__pycache__/target_assigners.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/acfpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/acfpn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/bifpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/bifpn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/hrfpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/hrfpn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/hrnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/hrnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/resnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/resnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/senet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/senet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/diou_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/diou_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/fcos_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/fcos_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/giou_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/giou_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/iou_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/iou_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/yolo_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/yolo_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/slim/sensitive/images/mobilev1_yolov3_voc_sensitives.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/slim/sensitive/images/mobilev1_yolov3_voc_sensitives.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable-v24/camera.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable-v24/camera.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable-v24/photo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable-v24/photo.png
--------------------------------------------------------------------------------
/ppdet/data/shared_queue/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/shared_queue/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/fcos.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/fcos.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/htc.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/htc.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/ssd.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/ssd.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/yolo.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/yolo.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/blazenet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/blazenet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/darknet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/darknet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/gc_block.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/gc_block.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/ghostnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/ghostnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/res2net.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/res2net.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/resnext.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/resnext.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/solov2_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/solov2_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/mask_head/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/mask_head/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_heads/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/shared_queue/__pycache__/sharedmemory.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/shared_queue/__pycache__/sharedmemory.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/data/transform/__pycache__/batch_operators.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/data/transform/__pycache__/batch_operators.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/experimental/__pycache__/mixed_precision.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/experimental/__pycache__/mixed_precision.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/rpn_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/rpn_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/ttf_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/ttf_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/solov2.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/solov2.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/ttfnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/ttfnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/cb_resnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/cb_resnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/cspdarknet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/cspdarknet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/faceboxnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/faceboxnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/hourglass.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/hourglass.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/mobilenet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/mobilenet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/diou_loss_yolo.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/diou_loss_yolo.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/iou_aware_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/iou_aware_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/smooth_l1_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/smooth_l1_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__pycache__/bbox_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_heads/__pycache__/bbox_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__pycache__/mask_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_heads/__pycache__/mask_head.cpython-37.pyc
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/photo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/photo.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/fcos_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/fcos_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/iou_aware.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/iou_aware.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/yolo_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/yolo_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/blazeface.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/blazeface.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/faceboxes.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/faceboxes.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/mask_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/mask_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/retinanet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/retinanet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/efficientnet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/efficientnet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/mobilenet_v3.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/mobilenet_v3.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/name_adapter.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/name_adapter.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/balanced_l1_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/balanced_l1_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_extractors/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_extractors/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__pycache__/cascade_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_heads/__pycache__/cascade_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__pycache__/htc_bbox_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_heads/__pycache__/htc_bbox_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__pycache__/htc_mask_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_heads/__pycache__/htc_mask_head.cpython-37.pyc
--------------------------------------------------------------------------------
/configs/face_detection/README.md:
--------------------------------------------------------------------------------
1 | **文档教程请参考:** [FACE_DETECTION.md](../../docs/featured_model/FACE_DETECTION.md)
2 | **English document please refer:** [FACE_DETECTION_en.md](../../docs/featured_model/FACE_DETECTION_en.md)
3 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/camera.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/camera.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/corner_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/corner_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/retina_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/retina_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/solov2_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/solov2_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/cascade_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/cascade_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/efficientdet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/efficientdet.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/faster_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/faster_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/input_helper.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/input_helper.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/backbones/__pycache__/nonlocal_helper.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/backbones/__pycache__/nonlocal_helper.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/losses/__pycache__/ssd_with_lmk_loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/losses/__pycache__/ssd_with_lmk_loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/mask_head/__pycache__/solo_mask_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/mask_head/__pycache__/solo_mask_head.cpython-37.pyc
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__pycache__/efficient_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/anchor_heads/__pycache__/efficient_head.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_extractors/__pycache__/roi_extractor.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_extractors/__pycache__/roi_extractor.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__pycache__/htc_semantic_head.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/roi_heads/__pycache__/htc_semantic_head.cpython-37.pyc
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/cascade_mask_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/cascade_mask_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/cornernet_squeeze.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/cornernet_squeeze.cpython-37.pyc
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/btn_switch_default.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/btn_switch_default.png
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/btn_switch_pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/deploy/android_demo/app/src/main/res/drawable-xxhdpi-v4/btn_switch_pressed.png
--------------------------------------------------------------------------------
/ppdet/modeling/architectures/__pycache__/cascade_rcnn_cls_aware.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Sharpiless/PaddleDetection-Pedestrians-Detection-and-Tracking/HEAD/ppdet/modeling/architectures/__pycache__/cascade_rcnn_cls_aware.cpython-37.pyc
--------------------------------------------------------------------------------
/dygraph/dataset/voc/label_list.txt:
--------------------------------------------------------------------------------
1 | aeroplane
2 | bicycle
3 | bird
4 | boat
5 | bottle
6 | bus
7 | car
8 | cat
9 | chair
10 | cow
11 | diningtable
12 | dog
13 | horse
14 | motorbike
15 | person
16 | pottedplant
17 | sheep
18 | sofa
19 | train
20 | tvmonitor
21 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/ssd_vgg16_300_240e_voc.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/voc.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_240e.yml',
5 | '_base_/ssd_vgg16_300.yml',
6 | '_base_/ssd_reader.yml',
7 | ]
8 | weights: output/ssd_vgg16_300_240e_voc/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r50_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_instance.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1x.yml',
5 | '_base_/mask_rcnn_r50.yml',
6 | '_base_/mask_reader.yml',
7 | ]
8 | weights: output/mask_rcnn_r50_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/solov2/solov2_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_instance.yml',
3 | '../runtime.yml',
4 | '_base_/solov2_r50_fpn.yml',
5 | '_base_/optimizer_1x.yml',
6 | '_base_/solov2_reader.yml',
7 | ]
8 | weights: output/solov2_r50_fpn_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r50_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1x.yml',
5 | '_base_/faster_rcnn_r50.yml',
6 | '_base_/faster_reader.yml',
7 | ]
8 | weights: output/faster_rcnn_r50_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/fcos/fcos_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/fcos_r50_fpn.yml',
5 | '_base_/optimizer_1x.yml',
6 | '_base_/fcos_reader.yml',
7 | ]
8 |
9 | weights: output/fcos_r50_fpn_1x_coco/model_final
10 |
--------------------------------------------------------------------------------
/dygraph/configs/ttfnet/ttfnet_darknet53_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1x.yml',
5 | '_base_/ttfnet_darknet53.yml',
6 | '_base_/ttfnet_reader.yml',
7 | ]
8 | weights: output/ttfnet_darknet53_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_instance.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1x.yml',
5 | '_base_/mask_rcnn_r50_fpn.yml',
6 | '_base_/mask_fpn_reader.yml',
7 | ]
8 | weights: output/mask_rcnn_r50_fpn_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/ssd_mobilenet_v1_300_120e_voc.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/voc.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_120e.yml',
5 | '_base_/ssd_mobilenet_v1_300.yml',
6 | '_base_/ssd_mobilenet_reader.yml',
7 | ]
8 | weights: output/ssd_mobilenet_v1_300_120e_voc/model_final
9 |
--------------------------------------------------------------------------------
/deploy/android_demo/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Wed Sep 16 11:35:24 CST 2020
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-6.1.1-all.zip
7 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1x.yml',
5 | '_base_/faster_rcnn_r50_fpn.yml',
6 | '_base_/faster_fpn_reader.yml',
7 | ]
8 | weights: output/faster_rcnn_r50_fpn_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/ssdlite_mobilenet_v1_300_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1700e.yml',
5 | '_base_/ssdlite_mobilenet_v1_300.yml',
6 | '_base_/ssdlite300_reader.yml',
7 | ]
8 | weights: output/ssdlite_mobilenet_v1_300_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1x.yml',
5 | '_base_/cascade_rcnn_r50_fpn.yml',
6 | '_base_/cascade_fpn_reader.yml',
7 | ]
8 | weights: output/cascade_rcnn_r50_fpn_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_instance.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1x.yml',
5 | '_base_/cascade_mask_rcnn_r50_fpn.yml',
6 | '_base_/cascade_mask_fpn_reader.yml',
7 | ]
8 | weights: output/cascade_mask_rcnn_r50_fpn_1x_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/ssdlite_mobilenet_v3_large_320_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1700e.yml',
5 | '_base_/ssdlite_mobilenet_v3_large_320.yml',
6 | '_base_/ssdlite320_reader.yml',
7 | ]
8 | weights: output/ssdlite_mobilenet_v3_large_320_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/ssdlite_mobilenet_v3_small_320_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_1700e.yml',
5 | '_base_/ssdlite_mobilenet_v3_small_320.yml',
6 | '_base_/ssdlite320_reader.yml',
7 | ]
8 | weights: output/ssdlite_mobilenet_v3_small_320_coco/model_final
9 |
--------------------------------------------------------------------------------
/dygraph/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | './_base_/ppyolo_r50vd_dcn.yml',
5 | './_base_/optimizer_1x.yml',
6 | './_base_/ppyolo_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 16
10 | weights: output/ppyolo_r50vd_dcn_1x_coco/model_final
11 |
--------------------------------------------------------------------------------
/dygraph/configs/ppyolo/ppyolo_r50vd_dcn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | './_base_/ppyolo_r50vd_dcn.yml',
5 | './_base_/optimizer_2x.yml',
6 | './_base_/ppyolo_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 16
10 | weights: output/ppyolo_r50vd_dcn_2x_coco/model_final
11 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/yolov3_darknet53_270e_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_270e.yml',
5 | '_base_/yolov3_darknet53.yml',
6 | '_base_/yolov3_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 5
10 | weights: output/yolov3_darknet53_270e_coco/model_final
11 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/yolov3_r50vd_dcn_270e_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_270e.yml',
5 | '_base_/yolov3_r50vd_dcn.yml',
6 | '_base_/yolov3_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 5
10 | weights: output/yolov3_r50vd_dcn_270e_coco/model_final
11 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/btn_shutter.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/btn_switch.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/yolov3_mobilenet_v1_270e_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_270e.yml',
5 | '_base_/yolov3_mobilenet_v1.yml',
6 | '_base_/yolov3_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 5
10 | weights: output/yolov3_mobilenet_v1_270e_coco/model_final
11 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/btn_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/yolov3_mobilenet_v3_large_270e_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/optimizer_270e.yml',
5 | '_base_/yolov3_mobilenet_v3_large.yml',
6 | '_base_/yolov3_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 5
10 | weights: output/yolov3_mobilenet_v3_large_270e_coco/model_final
11 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/backbones/__init__.py:
--------------------------------------------------------------------------------
1 | from . import vgg
2 | from . import resnet
3 | from . import darknet
4 | from . import mobilenet_v1
5 | from . import mobilenet_v3
6 | from . import hrnet
7 |
8 | from .vgg import *
9 | from .resnet import *
10 | from .darknet import *
11 | from .mobilenet_v1 import *
12 | from .mobilenet_v3 import *
13 | from .hrnet import *
14 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r50_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_1x_coco.yml',
3 | ]
4 | weights: output/mask_rcnn_r50_2x_coco/model_final
5 |
6 | epoch: 24
7 | LearningRate:
8 | base_lr: 0.01
9 | schedulers:
10 | - !PiecewiseDecay
11 | gamma: 0.1
12 | milestones: [16, 22]
13 | - !LinearWarmup
14 | start_factor: 0.3333333333333333
15 | steps: 500
16 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | weights: output/faster_rcnn_r50_fpn_2x_coco/model_final
5 |
6 | epoch: 24
7 | LearningRate:
8 | base_lr: 0.01
9 | schedulers:
10 | - !PiecewiseDecay
11 | gamma: 0.1
12 | milestones: [16, 22]
13 | - !LinearWarmup
14 | start_factor: 0.1
15 | steps: 1000
16 |
--------------------------------------------------------------------------------
/deploy/android_demo/local.properties:
--------------------------------------------------------------------------------
1 | ## This file must *NOT* be checked into Version Control Systems,
2 | # as it contains information specific to your local configuration.
3 | #
4 | # Location of the SDK. This is only used by Gradle.
5 | # For customization when using a Version Control System, please read the
6 | # header note.
7 | #Wed Sep 16 11:35:13 CST 2020
8 | sdk.dir=/Users/path/to/Library/Android/sdk
9 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | weights: output/mask_rcnn_r50_fpn_2x_coco/model_final
5 |
6 | epoch: 24
7 | LearningRate:
8 | base_lr: 0.01
9 | schedulers:
10 | - !PiecewiseDecay
11 | gamma: 0.1
12 | milestones: [16, 22]
13 | - !LinearWarmup
14 | start_factor: 0.3333333333333333
15 | steps: 500
16 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/optimizer_1700e.yml:
--------------------------------------------------------------------------------
1 | epoch: 1700
2 |
3 | LearningRate:
4 | base_lr: 0.4
5 | schedulers:
6 | - !CosineDecay
7 | max_epochs: 1700
8 | - !LinearWarmup
9 | start_factor: 0.3333333333333333
10 | steps: 2000
11 |
12 | OptimizerBuilder:
13 | optimizer:
14 | momentum: 0.9
15 | type: Momentum
16 | regularizer:
17 | factor: 0.0005
18 | type: L2
19 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/local.properties:
--------------------------------------------------------------------------------
1 | ## This file must *NOT* be checked into Version Control Systems,
2 | # as it contains information specific to your local configuration.
3 | #
4 | # Location of the SDK. This is only used by Gradle.
5 | # For customization when using a Version Control System, please read the
6 | # header note.
7 | #Wed Sep 16 11:31:42 CST 2020
8 | sdk.dir=/Users/yuguanghua02/Library/Android/sdk
9 |
--------------------------------------------------------------------------------
/dygraph/configs/solov2/_base_/optimizer_1x.yml:
--------------------------------------------------------------------------------
1 | epoch: 12
2 |
3 | LearningRate:
4 | base_lr: 0.01
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones: [8, 11]
9 | - !LinearWarmup
10 | start_factor: 0.
11 | steps: 1000
12 |
13 | OptimizerBuilder:
14 | optimizer:
15 | momentum: 0.9
16 | type: Momentum
17 | regularizer:
18 | factor: 0.0001
19 | type: L2
20 |
--------------------------------------------------------------------------------
/dygraph/configs/ttfnet/_base_/optimizer_1x.yml:
--------------------------------------------------------------------------------
1 | epoch: 12
2 |
3 | LearningRate:
4 | base_lr: 0.015
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones: [8, 11]
9 | - !LinearWarmup
10 | start_factor: 0.2
11 | steps: 500
12 |
13 | OptimizerBuilder:
14 | optimizer:
15 | momentum: 0.9
16 | type: Momentum
17 | regularizer:
18 | factor: 0.0004
19 | type: L2
20 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/_base_/optimizer_1x.yml:
--------------------------------------------------------------------------------
1 | epoch: 12
2 |
3 | LearningRate:
4 | base_lr: 0.01
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones: [8, 11]
9 | - !LinearWarmup
10 | start_factor: 0.1
11 | steps: 1000
12 |
13 | OptimizerBuilder:
14 | optimizer:
15 | momentum: 0.9
16 | type: Momentum
17 | regularizer:
18 | factor: 0.0001
19 | type: L2
20 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/_base_/optimizer_1x.yml:
--------------------------------------------------------------------------------
1 | epoch: 12
2 |
3 | LearningRate:
4 | base_lr: 0.01
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones: [8, 11]
9 | - !LinearWarmup
10 | start_factor: 0.001
11 | steps: 1000
12 |
13 | OptimizerBuilder:
14 | optimizer:
15 | momentum: 0.9
16 | type: Momentum
17 | regularizer:
18 | factor: 0.0001
19 | type: L2
20 |
--------------------------------------------------------------------------------
/dygraph/configs/cascade_rcnn/_base_/optimizer_1x.yml:
--------------------------------------------------------------------------------
1 | epoch: 12
2 |
3 | LearningRate:
4 | base_lr: 0.01
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones: [8, 11]
9 | - !LinearWarmup
10 | start_factor: 0.001
11 | steps: 1000
12 |
13 | OptimizerBuilder:
14 | optimizer:
15 | momentum: 0.9
16 | type: Momentum
17 | regularizer:
18 | factor: 0.0001
19 | type: L2
20 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r101_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar
6 | weights: output/faster_rcnn_r101_1x_coco/model_final
7 |
8 | ResNet:
9 | # index 0 stands for res2
10 | depth: 101
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [2]
14 | num_stages: 3
15 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar
5 | weights: output/mask_rcnn_r101_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 101
10 | norm_type: bn
11 | freeze_at: 0
12 | return_idx: [0,1,2,3]
13 | num_stages: 4
14 |
--------------------------------------------------------------------------------
/dygraph/configs/ppyolo/ppyolo_test.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | './_base_/ppyolo_r50vd_dcn.yml',
5 | './_base_/ppyolo_1x.yml',
6 | './_base_/ppyolo_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 16
10 |
11 | EvalDataset:
12 | !COCODataSet
13 | image_dir: test2017
14 | anno_path: annotations/image_info_test-dev2017.json
15 | dataset_dir: dataset/coco
16 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/optimizer_120e.yml:
--------------------------------------------------------------------------------
1 | epoch: 120
2 |
3 | LearningRate:
4 | base_lr: 0.001
5 | schedulers:
6 | - !PiecewiseDecay
7 | milestones: [40, 60, 80, 100]
8 | values: [0.001, 0.0005, 0.00025, 0.0001, 0.00001]
9 | use_warmup: false
10 |
11 | OptimizerBuilder:
12 | optimizer:
13 | momentum: 0.0
14 | type: RMSProp
15 | regularizer:
16 | factor: 0.00005
17 | type: L2
18 |
--------------------------------------------------------------------------------
/dygraph/configs/fcos/_base_/optimizer_1x.yml:
--------------------------------------------------------------------------------
1 | epoch: 12
2 |
3 | LearningRate:
4 | base_lr: 0.01
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones: [8, 11]
9 | - !LinearWarmup
10 | start_factor: 0.3333333333333333
11 | steps: 500
12 |
13 | OptimizerBuilder:
14 | optimizer:
15 | momentum: 0.9
16 | type: Momentum
17 | regularizer:
18 | factor: 0.0001
19 | type: L2
20 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r34_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet34_pretrained.tar
6 | weights: output/faster_rcnn_r34_fpn_1x_coco/model_final
7 |
8 | ResNet:
9 | # index 0 stands for res2
10 | depth: 34
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar
6 | weights: output/faster_rcnn_r101_fpn_1x_coco/model_final
7 |
8 | ResNet:
9 | # index 0 stands for res2
10 | depth: 101
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r50_vd_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
5 | weights: output/faster_rcnn_r50_vd_1x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 50
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [2]
14 | num_stages: 3
15 |
--------------------------------------------------------------------------------
/dygraph/configs/ppyolo/_base_/optimizer_1x.yml:
--------------------------------------------------------------------------------
1 | epoch: 405
2 |
3 | LearningRate:
4 | base_lr: 0.01
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones:
9 | - 243
10 | - 324
11 | - !LinearWarmup
12 | start_factor: 0.
13 | steps: 4000
14 |
15 | OptimizerBuilder:
16 | optimizer:
17 | momentum: 0.9
18 | type: Momentum
19 | regularizer:
20 | factor: 0.0005
21 | type: L2
22 |
--------------------------------------------------------------------------------
/dygraph/configs/ppyolo/_base_/optimizer_2x.yml:
--------------------------------------------------------------------------------
1 | epoch: 811
2 |
3 | LearningRate:
4 | base_lr: 0.01
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones:
9 | - 649
10 | - 730
11 | - !LinearWarmup
12 | start_factor: 0.
13 | steps: 4000
14 |
15 | OptimizerBuilder:
16 | optimizer:
17 | momentum: 0.9
18 | type: Momentum
19 | regularizer:
20 | factor: 0.0005
21 | type: L2
22 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/_base_/optimizer_270e.yml:
--------------------------------------------------------------------------------
1 | epoch: 270
2 |
3 | LearningRate:
4 | base_lr: 0.001
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones:
9 | - 216
10 | - 243
11 | - !LinearWarmup
12 | start_factor: 0.
13 | steps: 4000
14 |
15 | OptimizerBuilder:
16 | optimizer:
17 | momentum: 0.9
18 | type: Momentum
19 | regularizer:
20 | factor: 0.0005
21 | type: L2
22 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r101_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar
5 | weights: output/mask_rcnn_r101_vd_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 101
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r50_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
6 | weights: output/mask_rcnn_r50_vd_fpn_1x_coco/model_final
7 |
8 | ResNet:
9 | # index 0 stands for res2
10 | depth: 50
11 | variant: d
12 | norm_type: bn
13 | freeze_at: 0
14 | return_idx: [0,1,2,3]
15 | num_stages: 4
16 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/optimizer_240e.yml:
--------------------------------------------------------------------------------
1 | epoch: 240
2 |
3 | LearningRate:
4 | base_lr: 0.001
5 | schedulers:
6 | - !PiecewiseDecay
7 | gamma: 0.1
8 | milestones:
9 | - 160
10 | - 200
11 | - !LinearWarmup
12 | start_factor: 0.3333333333333333
13 | steps: 500
14 |
15 | OptimizerBuilder:
16 | optimizer:
17 | momentum: 0.9
18 | type: Momentum
19 | regularizer:
20 | factor: 0.0005
21 | type: L2
22 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r101_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar
5 | weights: output/faster_rcnn_r101_vd_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 101
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r50_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
5 | weights: output/faster_rcnn_r50_vd_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 50
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r34_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet34_vd_pretrained.tar
6 | weights: output/faster_rcnn_r34_vd_fpn_1x_coco/model_final
7 |
8 | ResNet:
9 | # index 0 stands for res2
10 | depth: 34
11 | variant: d
12 | norm_type: bn
13 | freeze_at: 0
14 | return_idx: [0,1,2,3]
15 | num_stages: 4
16 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/faster_rcnn_dcn_r50_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
5 | weights: output/faster_rcnn_dcn_r50_vd_fpn_2x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 50
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 | dcn_v2_stages: [1,2,3]
16 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/mask_rcnn_dcn_r101_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar
5 | weights: output/mask_rcnn_dcn_r101_vd_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 101
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 | dcn_v2_stages: [1,2,3]
16 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/values/colors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | #6200EE
4 | #3700B3
5 | #1E90FF
6 | #FF000000
7 | #00000000
8 | #00000000
9 | #FFFFFFFF
10 |
11 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/faster_rcnn_dcn_r101_vd_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar
5 | weights: output/faster_rcnn_dcn_r101_vd_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 101
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 | dcn_v2_stages: [1,2,3]
16 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/mask_rcnn_dcn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_instance.yml',
3 | '../runtime.yml',
4 | '../mask_rcnn/_base_/optimizer_1x.yml',
5 | '../mask_rcnn/_base_/mask_rcnn_r50_fpn.yml',
6 | '../mask_rcnn/_base_/mask_fpn_reader.yml',
7 | ]
8 | weights: output/mask_rcnn_dcn_r50_fpn_1x_coco/model_final
9 |
10 | ResNet:
11 | depth: 50
12 | norm_type: bn
13 | freeze_at: 0
14 | return_idx: [0,1,2,3]
15 | num_stages: 4
16 | dcn_v2_stages: [1,2,3]
17 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/menu/menu_main.xml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/faster_rcnn_dcn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '../faster_rcnn/_base_/optimizer_1x.yml',
5 | '../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',
6 | '../faster_rcnn/_base_/faster_fpn_reader.yml',
7 | ]
8 | weights: output/faster_rcnn_dcn_r50_fpn_1x_coco/model_final
9 |
10 | ResNet:
11 | depth: 50
12 | norm_type: bn
13 | freeze_at: 0
14 | return_idx: [0,1,2,3]
15 | num_stages: 4
16 | dcn_v2_stages: [1,2,3]
17 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/cascade_rcnn_dcn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '../cascade_rcnn/_base_/optimizer_1x.yml',
5 | '../cascade_rcnn/_base_/cascade_rcnn_r50_fpn.yml',
6 | '../cascade_rcnn/_base_/cascade_fpn_reader.yml',
7 | ]
8 | weights: output/cascade_rcnn_dcn_r50_fpn_1x_coco/model_final
9 |
10 | ResNet:
11 | depth: 50
12 | norm_type: bn
13 | freeze_at: 0
14 | return_idx: [0,1,2,3]
15 | num_stages: 4
16 | dcn_v2_stages: [1,2,3]
17 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/cascade_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'cascade_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar
5 | weights: output/cascade_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | depth: 101
9 | groups: 64
10 | base_width: 4
11 | base_channels: 64
12 | variant: d
13 | norm_type: bn
14 | freeze_at: 0
15 | return_idx: [0,1,2,3]
16 | num_stages: 4
17 | dcn_v2_stages: [1,2,3]
18 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/test/java/com/baidu/paddledetection/detection/ExampleUnitTest.java:
--------------------------------------------------------------------------------
1 | package com.baidu.paddledetection.detection;
2 |
3 | import org.junit.Test;
4 |
5 | import static org.junit.Assert.*;
6 |
7 | /**
8 | * Example local unit test, which will execute on the development machine (host).
9 | *
10 | * @see Testing documentation
11 | */
12 | public class ExampleUnitTest {
13 | @Test
14 | public void addition_isCorrect() {
15 | assertEquals(4, 2 + 2);
16 | }
17 | }
--------------------------------------------------------------------------------
/dygraph/configs/datasets/coco_detection.yml:
--------------------------------------------------------------------------------
1 | metric: COCO
2 | num_classes: 80
3 |
4 | TrainDataset:
5 | !COCODataSet
6 | image_dir: train2017
7 | anno_path: annotations/instances_train2017.json
8 | dataset_dir: dataset/coco
9 | data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
10 |
11 | EvalDataset:
12 | !COCODataSet
13 | image_dir: val2017
14 | anno_path: annotations/instances_val2017.json
15 | dataset_dir: dataset/coco
16 |
17 | TestDataset:
18 | !ImageFolder
19 | anno_path: annotations/instances_val2017.json
20 |
--------------------------------------------------------------------------------
/dygraph/configs/datasets/coco_instance.yml:
--------------------------------------------------------------------------------
1 | metric: COCO
2 | num_classes: 80
3 |
4 | TrainDataset:
5 | !COCODataSet
6 | image_dir: train2017
7 | anno_path: annotations/instances_train2017.json
8 | dataset_dir: dataset/coco
9 | data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_poly', 'is_crowd']
10 |
11 | EvalDataset:
12 | !COCODataSet
13 | image_dir: val2017
14 | anno_path: annotations/instances_val2017.json
15 | dataset_dir: dataset/coco
16 |
17 | TestDataset:
18 | !ImageFolder
19 | anno_path: annotations/instances_val2017.json
20 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_x101_vd_64x4d_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar
6 | weights: output/faster_rcnn_x101_vd_64x4d_fpn_1x_coco/model_final
7 |
8 | ResNet:
9 | # for ResNeXt: groups, base_width, base_channels
10 | depth: 101
11 | groups: 64
12 | base_width: 4
13 | base_channels: 64
14 | variant: d
15 | norm_type: bn
16 | freeze_at: 0
17 | return_idx: [0,1,2,3]
18 | num_stages: 4
19 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/mask_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar
5 | weights: output/mask_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # for ResNeXt: groups, base_width, base_channels
9 | depth: 101
10 | variant: d
11 | groups: 64
12 | base_width: 4
13 | base_channels: 64
14 | norm_type: bn
15 | freeze_at: 0
16 | return_idx: [0,1,2,3]
17 | num_stages: 4
18 | dcn_v2_stages: [1,2,3]
19 |
--------------------------------------------------------------------------------
/dygraph/configs/datasets/voc.yml:
--------------------------------------------------------------------------------
1 | metric: VOC
2 | map_type: 11point
3 | num_classes: 20
4 |
5 | TrainDataset:
6 | !VOCDataSet
7 | dataset_dir: dataset/voc
8 | anno_path: trainval.txt
9 | label_list: label_list.txt
10 | data_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']
11 |
12 | EvalDataset:
13 | !VOCDataSet
14 | dataset_dir: dataset/voc
15 | anno_path: test.txt
16 | label_list: label_list.txt
17 | data_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']
18 |
19 | TestDataset:
20 | !ImageFolder
21 | anno_path: dataset/voc/label_list.txt
22 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/faster_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar
5 | weights: output/faster_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco/model_final
6 |
7 | ResNet:
8 | # for ResNeXt: groups, base_width, base_channels
9 | depth: 101
10 | groups: 64
11 | base_width: 4
12 | base_channels: 64
13 | variant: d
14 | norm_type: bn
15 | freeze_at: 0
16 | return_idx: [0,1,2,3]
17 | num_stages: 4
18 | dcn_v2_stages: [1,2,3]
19 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/__init__.py:
--------------------------------------------------------------------------------
1 | from . import ops
2 | from . import backbones
3 | from . import necks
4 | from . import proposal_generator
5 | from . import heads
6 | from . import losses
7 | from . import architectures
8 | from . import post_process
9 | from . import layers
10 | from . import utils
11 |
12 | from .ops import *
13 | from .backbones import *
14 | from .necks import *
15 | from .proposal_generator import *
16 | from .heads import *
17 | from .losses import *
18 | from .architectures import *
19 | from .post_process import *
20 | from .layers import *
21 | from .utils import *
22 |
--------------------------------------------------------------------------------
/dygraph/configs/slim/quant/yolov3_mobilenet_v1_qat.yml:
--------------------------------------------------------------------------------
1 | # Weights of yolov3_mobilenet_v1_coco
2 | pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/yolov3_mobilenet_v1_270e_coco.pdparams
3 | load_static_weights: False
4 | weight_type: resume
5 | slim: QAT
6 |
7 | QAT:
8 | quant_config: {
9 | 'weight_quantize_type': 'channel_wise_abs_max', 'activation_quantize_type': 'moving_average_abs_max',
10 | 'weight_bits': 8, 'activation_bits': 8, 'dtype': 'int8', 'window_size': 10000, 'moving_rate': 0.9,
11 | 'quantizable_layer_type': ['Conv2D', 'Linear']}
12 | print_model: True
13 |
--------------------------------------------------------------------------------
/dygraph/configs/datasets/roadsign_voc.yml:
--------------------------------------------------------------------------------
1 | metric: VOC
2 | map_type: 11point
3 | num_classes: 4
4 |
5 | TrainDataset:
6 | !VOCDataSet
7 | dataset_dir: dataset/roadsign_voc
8 | anno_path: train.txt
9 | label_list: label_list.txt
10 | data_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']
11 |
12 | EvalDataset:
13 | !VOCDataSet
14 | dataset_dir: dataset/roadsign_voc
15 | anno_path: valid.txt
16 | label_list: label_list.txt
17 | data_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']
18 |
19 | TestDataset:
20 | !ImageFolder
21 | anno_path: dataset/roadsign_voc/label_list.txt
22 |
--------------------------------------------------------------------------------
/dygraph/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | './_base_/faster_rcnn_hrnetv2p_w18.yml',
4 | '../faster_rcnn/_base_/optimizer_1x.yml',
5 | '../faster_rcnn/_base_/faster_fpn_reader.yml',
6 | '../runtime.yml',
7 | ]
8 |
9 | weights: output/faster_rcnn_hrnetv2p_w18_1x_coco/model_final
10 | epoch: 12
11 |
12 | LearningRate:
13 | base_lr: 0.02
14 | schedulers:
15 | - !PiecewiseDecay
16 | gamma: 0.1
17 | milestones: [8, 11]
18 | - !LinearWarmup
19 | start_factor: 0.1
20 | steps: 1000
21 |
22 | TrainReader:
23 | batch_size: 2
24 |
--------------------------------------------------------------------------------
/dygraph/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | './_base_/faster_rcnn_hrnetv2p_w18.yml',
4 | '../faster_rcnn/_base_/optimizer_1x.yml',
5 | '../faster_rcnn/_base_/faster_fpn_reader.yml',
6 | '../runtime.yml',
7 | ]
8 |
9 | weights: output/faster_rcnn_hrnetv2p_w18_2x_coco/model_final
10 | epoch: 24
11 |
12 | LearningRate:
13 | base_lr: 0.02
14 | schedulers:
15 | - !PiecewiseDecay
16 | gamma: 0.1
17 | milestones: [16, 22]
18 | - !LinearWarmup
19 | start_factor: 0.1
20 | steps: 1000
21 |
22 | TrainReader:
23 | batch_size: 2
24 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar
6 | weights: output/faster_rcnn_r101_fpn_2x_coco/model_final
7 |
8 | ResNet:
9 | # index 0 stands for res2
10 | depth: 101
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
16 | epoch: 24
17 | LearningRate:
18 | base_lr: 0.01
19 | schedulers:
20 | - !PiecewiseDecay
21 | gamma: 0.1
22 | milestones: [16, 22]
23 | - !LinearWarmup
24 | start_factor: 0.1
25 | steps: 1000
26 |
--------------------------------------------------------------------------------
/dygraph/configs/slim/quant/yolov3_mobilenet_v3_qat.yml:
--------------------------------------------------------------------------------
1 | # Weights of yolov3_mobilenet_v3_coco
2 | pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/yolov3_mobilenet_v3_large_270e_coco.pdparams
3 | load_static_weights: False
4 | weight_type: resume
5 | slim: QAT
6 |
7 | QAT:
8 | quant_config: {
9 | 'weight_preprocess_type': 'PACT',
10 | 'weight_quantize_type': 'channel_wise_abs_max', 'activation_quantize_type': 'moving_average_abs_max',
11 | 'weight_bits': 8, 'activation_bits': 8, 'dtype': 'int8', 'window_size': 10000, 'moving_rate': 0.9,
12 | 'quantizable_layer_type': ['Conv2D', 'Linear']}
13 | print_model: True
14 |
--------------------------------------------------------------------------------
/ppdet/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/tools/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r50_vd_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
5 | weights: output/faster_rcnn_r50_vd_fpn_2x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 50
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
16 | epoch: 24
17 | LearningRate:
18 | base_lr: 0.01
19 | schedulers:
20 | - !PiecewiseDecay
21 | gamma: 0.1
22 | milestones: [16, 22]
23 | - !LinearWarmup
24 | start_factor: 0.1
25 | steps: 1000
26 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_r101_vd_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar
5 | weights: output/faster_rcnn_r101_vd_fpn_2x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 101
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 |
16 | epoch: 24
17 | LearningRate:
18 | base_lr: 0.01
19 | schedulers:
20 | - !PiecewiseDecay
21 | gamma: 0.1
22 | milestones: [16, 22]
23 | - !LinearWarmup
24 | start_factor: 0.1
25 | steps: 1000
26 |
--------------------------------------------------------------------------------
/ppdet/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_r50_vd_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
6 | weights: output/mask_rcnn_r50_vd_fpn_2x_coco/model_final
7 |
8 | ResNet:
9 | # index 0 stands for res2
10 | depth: 50
11 | variant: d
12 | norm_type: bn
13 | freeze_at: 0
14 | return_idx: [0,1,2,3]
15 | num_stages: 4
16 |
17 | epoch: 24
18 | LearningRate:
19 | base_lr: 0.01
20 | schedulers:
21 | - !PiecewiseDecay
22 | gamma: 0.1
23 | milestones: [16, 22]
24 | - !LinearWarmup
25 | start_factor: 0.3333333333333333
26 | steps: 500
27 |
--------------------------------------------------------------------------------
/dygraph/ppdet/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/ppdet/core/config/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dygraph/ppdet/core/config/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/ppdet/modeling/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/mask_rcnn_dcn_r50_vd_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
5 | weights: output/mask_rcnn_dcn_r50_vd_fpn_2x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 50
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 | dcn_v2_stages: [1,2,3]
16 |
17 | epoch: 24
18 | LearningRate:
19 | base_lr: 0.01
20 | schedulers:
21 | - !PiecewiseDecay
22 | gamma: 0.1
23 | milestones: [16, 22]
24 | - !LinearWarmup
25 | start_factor: 0.1
26 | steps: 1000
27 |
--------------------------------------------------------------------------------
/deploy/android_demo/build.gradle:
--------------------------------------------------------------------------------
1 | // Top-level build file where you can add configuration options common to all sub-projects/modules.
2 |
3 | buildscript {
4 | repositories {
5 | google()
6 | jcenter()
7 |
8 | }
9 | dependencies {
10 | classpath 'com.android.tools.build:gradle:4.0.1'
11 |
12 | // NOTE: Do not place your application dependencies here; they belong
13 | // in the individual module build.gradle files
14 | }
15 | }
16 |
17 | allprojects {
18 | repositories {
19 | google()
20 | jcenter()
21 |
22 | }
23 | }
24 |
25 | task clean(type: Delete) {
26 | delete rootProject.buildDir
27 | }
28 |
--------------------------------------------------------------------------------
/dygraph/configs/dcn/faster_rcnn_dcn_r50_vd_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_dcn_r50_fpn_1x_coco.yml',
3 | ]
4 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
5 | weights: output/faster_rcnn_dcn_r50_vd_fpn_2x_coco/model_final
6 |
7 | ResNet:
8 | # index 0 stands for res2
9 | depth: 50
10 | variant: d
11 | norm_type: bn
12 | freeze_at: 0
13 | return_idx: [0,1,2,3]
14 | num_stages: 4
15 | dcn_v2_stages: [1,2,3]
16 |
17 | epoch: 24
18 | LearningRate:
19 | base_lr: 0.01
20 | schedulers:
21 | - !PiecewiseDecay
22 | gamma: 0.1
23 | milestones: [16, 22]
24 | - !LinearWarmup
25 | start_factor: 0.1
26 | steps: 1000
27 |
--------------------------------------------------------------------------------
/dygraph/ppdet/core/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import config
16 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import bbox_util
16 |
17 | from .bbox_util import *
18 |
--------------------------------------------------------------------------------
/ppdet/core/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import ppdet.modeling
16 | import ppdet.optimizer
17 | import ppdet.data
18 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_x101_vd_64x4d_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar
6 | weights: output/mask_rcnn_x101_vd_64x4d_fpn_1x_coco/model_final
7 |
8 | ResNet:
9 | # for ResNeXt: groups, base_width, base_channels
10 | depth: 101
11 | variant: d
12 | groups: 64
13 | base_width: 4
14 | base_channels: 64
15 | norm_type: bn
16 | freeze_at: 0
17 | return_idx: [0,1,2,3]
18 | num_stages: 4
19 |
20 | epoch: 12
21 | LearningRate:
22 | base_lr: 0.01
23 | schedulers:
24 | - !PiecewiseDecay
25 | gamma: 0.1
26 | milestones: [8, 11]
27 | - !LinearWarmup
28 | start_factor: 0.1
29 | steps: 1000
30 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/mask_rcnn_x101_vd_64x4d_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'mask_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar
6 | weights: output/mask_rcnn_x101_vd_64x4d_fpn_2x_coco/model_final
7 |
8 | ResNet:
9 | # for ResNeXt: groups, base_width, base_channels
10 | depth: 101
11 | variant: d
12 | groups: 64
13 | base_width: 4
14 | base_channels: 64
15 | norm_type: bn
16 | freeze_at: 0
17 | return_idx: [0,1,2,3]
18 | num_stages: 4
19 |
20 | epoch: 24
21 | LearningRate:
22 | base_lr: 0.01
23 | schedulers:
24 | - !PiecewiseDecay
25 | gamma: 0.1
26 | milestones: [16, 22]
27 | - !LinearWarmup
28 | start_factor: 0.1
29 | steps: 1000
30 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/faster_rcnn_x101_vd_64x4d_fpn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | 'faster_rcnn_r50_fpn_1x_coco.yml',
3 | ]
4 |
5 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar
6 | weights: output/faster_rcnn_x101_vd_64x4d_fpn_2x_coco/model_final
7 |
8 | ResNet:
9 | # for ResNeXt: groups, base_width, base_channels
10 | depth: 101
11 | groups: 64
12 | base_width: 4
13 | base_channels: 64
14 | variant: d
15 | norm_type: bn
16 | freeze_at: 0
17 | return_idx: [0,1,2,3]
18 | num_stages: 4
19 |
20 | epoch: 24
21 | LearningRate:
22 | base_lr: 0.01
23 | schedulers:
24 | - !PiecewiseDecay
25 | gamma: 0.1
26 | milestones: [16, 22]
27 | - !LinearWarmup
28 | start_factor: 0.1
29 | steps: 1000
30 |
--------------------------------------------------------------------------------
/dygraph/configs/fcos/fcos_dcn_r50_fpn_1x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/fcos_r50_fpn.yml',
5 | '_base_/optimizer_1x.yml',
6 | '_base_/fcos_reader.yml',
7 | ]
8 |
9 | weights: output/fcos_dcn_r50_fpn_1x_coco/model_final
10 |
11 | ResNet:
12 | depth: 50
13 | norm_type: bn
14 | freeze_at: 0
15 | return_idx: [1,2,3]
16 | num_stages: 4
17 | dcn_v2_stages: [1,2,3]
18 |
19 | FCOSHead:
20 | fcos_feat:
21 | name: FCOSFeat
22 | feat_in: 256
23 | feat_out: 256
24 | num_convs: 4
25 | norm_type: "gn"
26 | use_dcn: true
27 | num_classes: 80
28 | fpn_stride: [8, 16, 32, 64, 128]
29 | prior_prob: 0.01
30 | fcos_loss: FCOSLoss
31 | norm_reg_targets: true
32 | centerness_on_reg: true
33 |
--------------------------------------------------------------------------------
/ppdet/ext_op/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
2 | #
3 | #Licensed under the Apache License, Version 2.0 (the "License");
4 | #you may not use this file except in compliance with the License.
5 | #You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | #Unless required by applicable law or agreed to in writing, software
10 | #distributed under the License is distributed on an "AS IS" BASIS,
11 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | #See the License for the specific language governing permissions and
13 | #limitations under the License.
14 |
15 | from . import cornerpool_lib
16 | from .cornerpool_lib import *
17 |
18 | __all__ = cornerpool_lib.__all__
19 |
--------------------------------------------------------------------------------
/dygraph/ppdet/slim/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import prune
16 | from . import quant
17 |
18 | from .prune import *
19 | from .quant import *
20 |
--------------------------------------------------------------------------------
/dygraph/ppdet/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import (core, data, engine, modeling, model_zoo, optimizer, metrics,
16 | py_op, utils, slim)
17 |
--------------------------------------------------------------------------------
/dygraph/configs/ttfnet/_base_/ttfnet_darknet53.yml:
--------------------------------------------------------------------------------
1 | architecture: TTFNet
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_pretrained.tar
3 | load_static_weights: True
4 |
5 | TTFNet:
6 | backbone: DarkNet
7 | neck: TTFFPN
8 | ttf_head: TTFHead
9 | post_process: BBoxPostProcess
10 |
11 | DarkNet:
12 | depth: 53
13 | freeze_at: 0
14 | return_idx: [1, 2, 3, 4]
15 | norm_type: bn
16 | norm_decay: 0.0004
17 |
18 | # use default config
19 | # TTFFPN:
20 |
21 | TTFHead:
22 | hm_loss:
23 | name: CTFocalLoss
24 | loss_weight: 1.
25 | wh_loss:
26 | name: GIoULoss
27 | loss_weight: 5.
28 | reduction: sum
29 |
30 | BBoxPostProcess:
31 | decode:
32 | name: TTFBox
33 | max_per_img: 100
34 | score_thresh: 0.01
35 | down_ratio: 4
36 |
--------------------------------------------------------------------------------
/ppdet/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 |
17 | from .reader import *
18 | from .source import *
19 | from .transform import *
20 |
--------------------------------------------------------------------------------
/ppdet/modeling/mask_head/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 |
17 | from . import solo_mask_head
18 |
19 | from .solo_mask_head import *
20 |
--------------------------------------------------------------------------------
/ppdet/modeling/roi_extractors/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 |
17 | from . import roi_extractor
18 | from .roi_extractor import *
19 |
--------------------------------------------------------------------------------
/dygraph/ppdet/model_zoo/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import model_zoo
16 | from .model_zoo import *
17 |
18 | __all__ = model_zoo.__all__
19 |
--------------------------------------------------------------------------------
/demo/infer_cfg.yml:
--------------------------------------------------------------------------------
1 | draw_threshold: 0.5
2 | use_python_inference: false
3 | mode: fluid
4 | metric: VOC
5 | arch: YOLO
6 | min_subgraph_size: 3
7 | with_background: false
8 | Preprocess:
9 | - interp: 2
10 | max_size: 0
11 | target_size: 608
12 | type: Resize
13 | use_cv2: true
14 | - is_channel_first: false
15 | is_scale: true
16 | mean:
17 | - 0.485
18 | - 0.456
19 | - 0.406
20 | std:
21 | - 0.229
22 | - 0.224
23 | - 0.225
24 | type: Normalize
25 | - channel_first: true
26 | to_bgr: false
27 | type: Permute
28 | label_list:
29 | - aeroplane
30 | - bicycle
31 | - bird
32 | - boat
33 | - bottle
34 | - bus
35 | - car
36 | - cat
37 | - chair
38 | - cow
39 | - diningtable
40 | - dog
41 | - horse
42 | - motorbike
43 | - person
44 | - pottedplant
45 | - sheep
46 | - sofa
47 | - train
48 | - tvmonitor
49 |
--------------------------------------------------------------------------------
/dataset/wider_face/download.sh:
--------------------------------------------------------------------------------
1 | # All rights `PaddleDetection` reserved
2 | # References:
3 | # @inproceedings{yang2016wider,
4 | # Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},
5 | # Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
6 | # Title = {WIDER FACE: A Face Detection Benchmark},
7 | # Year = {2016}}
8 |
9 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
10 | cd "$DIR"
11 |
12 | # Download the data.
13 | echo "Downloading..."
14 | wget https://dataset.bj.bcebos.com/wider_face/WIDER_train.zip
15 | wget https://dataset.bj.bcebos.com/wider_face/WIDER_val.zip
16 | wget https://dataset.bj.bcebos.com/wider_face/wider_face_split.zip
17 | # Extract the data.
18 | echo "Extracting..."
19 | unzip -q WIDER_train.zip
20 | unzip -q WIDER_val.zip
21 | unzip -q wider_face_split.zip
22 |
--------------------------------------------------------------------------------
/ppdet/data/source/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import coco
16 | from . import voc
17 | from . import widerface
18 |
19 | from .coco import *
20 | from .voc import *
21 | from .widerface import *
22 |
--------------------------------------------------------------------------------
/ppdet/experimental/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 |
17 | from .mixed_precision import *
18 | from . import mixed_precision
19 |
20 | __all__ = mixed_precision.__all__
21 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/architectures/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | from . import meta_arch
9 | from . import faster_rcnn
10 | from . import mask_rcnn
11 | from . import yolo
12 | from . import cascade_rcnn
13 | from . import ssd
14 | from . import fcos
15 | from . import solov2
16 | from . import ttfnet
17 |
18 | from .meta_arch import *
19 | from .faster_rcnn import *
20 | from .mask_rcnn import *
21 | from .yolo import *
22 | from .cascade_rcnn import *
23 | from .ssd import *
24 | from .fcos import *
25 | from .solov2 import *
26 | from .ttfnet import *
27 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/proguard-rules.pro:
--------------------------------------------------------------------------------
1 | # Add project specific ProGuard rules here.
2 | # You can control the set of applied configuration files using the
3 | # proguardFiles setting in build.gradle.
4 | #
5 | # For more details, see
6 | # http://developer.android.com/guide/developing/tools/proguard.html
7 |
8 | # If your project uses WebView with JS, uncomment the following
9 | # and specify the fully qualified class name to the JavaScript interface
10 | # class:
11 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
12 | # public *;
13 | #}
14 |
15 | # Uncomment this to preserve the line number information for
16 | # debugging stack traces.
17 | #-keepattributes SourceFile,LineNumberTable
18 |
19 | # If you keep the line number information, uncomment this to
20 | # hide the original source file name.
21 | #-renamesourcefileattribute SourceFile
22 |
--------------------------------------------------------------------------------
/dygraph/ppdet/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import metrics
16 | from .metrics import *
17 |
18 | from . import category
19 | from .category import *
20 |
21 | __all__ = metrics.__all__ \
22 | + category.__all__
23 |
--------------------------------------------------------------------------------
/ppdet/data/transform/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import operators
16 | from . import batch_operators
17 |
18 | from .operators import *
19 | from .batch_operators import *
20 |
21 | __all__ = []
22 | __all__ += registered_ops
23 |
--------------------------------------------------------------------------------
/dygraph/ppdet/data/transform/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import operators
16 | from . import batch_operators
17 |
18 | from .operators import *
19 | from .batch_operators import *
20 |
21 | __all__ = []
22 | __all__ += registered_ops
23 |
--------------------------------------------------------------------------------
/dygraph/ppdet/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import source
16 | from . import transform
17 | from . import reader
18 |
19 | from .source import *
20 | from .transform import *
21 | from .reader import *
22 |
--------------------------------------------------------------------------------
/dygraph/ppdet/data/source/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import coco
16 | # TODO add voc and widerface dataset
17 | from . import voc
18 | #from . import widerface
19 |
20 | from .coco import *
21 | from .voc import *
22 | #from .widerface import *
23 |
--------------------------------------------------------------------------------
/dygraph/configs/gn/cascade_rcnn_r50_fpn_gn_2x.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '../cascade_rcnn/_base_/optimizer_1x.yml',
5 | '../cascade_rcnn/_base_/cascade_rcnn_r50_fpn.yml',
6 | '../cascade_rcnn/_base_/cascade_fpn_reader.yml',
7 | ]
8 | weights: output/cascade_rcnn_r50_fpn_gn_2x/model_final
9 |
10 | FPN:
11 | out_channel: 256
12 | norm_type: gn
13 |
14 | CascadeHead:
15 | head: CascadeXConvNormHead
16 | roi_extractor:
17 | resolution: 7
18 | sampling_ratio: 0
19 | aligned: True
20 | bbox_assigner: BBoxAssigner
21 |
22 | CascadeXConvNormHead:
23 | num_convs: 4
24 | mlp_dim: 1024
25 | norm_type: gn
26 |
27 |
28 | epoch: 24
29 | LearningRate:
30 | base_lr: 0.01
31 | schedulers:
32 | - !PiecewiseDecay
33 | gamma: 0.1
34 | milestones: [16, 22]
35 | - !LinearWarmup
36 | start_factor: 0.1
37 | steps: 1000
38 |
--------------------------------------------------------------------------------
/dygraph/configs/pedestrian/pedestrian_yolov3_darknet.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '../yolov3/_base_/optimizer_270e.yml',
5 | '../yolov3/_base_/yolov3_darknet53.yml',
6 | '../yolov3/_base_/yolov3_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 5
10 | weights: https://paddledet.bj.bcebos.com/models/pedestrian_yolov3_darknet.pdparams
11 |
12 | num_classes: 1
13 |
14 | TrainDataset:
15 | !COCODataSet
16 | dataset_dir: dataset/pedestrian
17 | anno_path: annotations/instances_train2017.json
18 | image_dir: train2017
19 | data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
20 |
21 | EvalDataset:
22 | !COCODataSet
23 | dataset_dir: dataset/pedestrian
24 | anno_path: annotations/instances_val2017.json
25 | image_dir: val2017
26 |
27 | TestDataset:
28 | !ImageFolder
29 | anno_path: configs/pedestrian/pedestrian.json
30 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/necks/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import fpn
16 | from . import yolo_fpn
17 | from . import hrfpn
18 | from . import ttf_fpn
19 |
20 | from .fpn import *
21 | from .yolo_fpn import *
22 | from .hrfpn import *
23 | from .ttf_fpn import *
24 |
--------------------------------------------------------------------------------
/dygraph/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | - repo: https://github.com/PaddlePaddle/mirrors-yapf.git
2 | sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37
3 | hooks:
4 | - id: yapf
5 | files: \.py$
6 | - repo: https://github.com/pre-commit/pre-commit-hooks
7 | sha: a11d9314b22d8f8c7556443875b731ef05965464
8 | hooks:
9 | - id: check-merge-conflict
10 | - id: check-symlinks
11 | - id: detect-private-key
12 | files: (?!.*paddle)^.*$
13 | - id: end-of-file-fixer
14 | files: \.(md|yml)$
15 | - id: trailing-whitespace
16 | files: \.(md|yml)$
17 | - repo: https://github.com/Lucas-C/pre-commit-hooks
18 | sha: v1.0.1
19 | hooks:
20 | - id: forbid-crlf
21 | files: \.(md|yml)$
22 | - id: remove-crlf
23 | files: \.(md|yml)$
24 | - id: forbid-tabs
25 | files: \.(md|yml)$
26 | - id: remove-tabs
27 | files: \.(md|yml)$
28 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/values/dimens.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 26dp
4 | 36dp
5 | 34dp
6 | 60dp
7 | 16dp
8 | 67dp
9 | 67dp
10 | 56dp
11 | 56dp
12 | 46dp
13 | 46dp
14 | 32dp
15 | 24dp
16 | 16dp
17 | 16dp
18 |
19 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/androidTest/java/com/baidu/paddledetection/detection/ExampleInstrumentedTest.java:
--------------------------------------------------------------------------------
1 | package com.baidu.paddledetection.detection;
2 |
3 | import android.content.Context;
4 | import android.support.test.InstrumentationRegistry;
5 | import android.support.test.runner.AndroidJUnit4;
6 |
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | import static org.junit.Assert.*;
11 |
12 | /**
13 | * Instrumented test, which will execute on an Android device.
14 | *
15 | * @see Testing documentation
16 | */
17 | @RunWith(AndroidJUnit4.class)
18 | public class ExampleInstrumentedTest {
19 | @Test
20 | public void useAppContext() {
21 | // Context of the app under test.
22 | Context appContext = InstrumentationRegistry.getTargetContext();
23 |
24 | assertEquals("com.baidu.paddle.lite.demo", appContext.getPackageName());
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/deploy/android_demo/paddledetection_demo.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/dygraph/configs/solov2/_base_/solov2_r50_fpn.yml:
--------------------------------------------------------------------------------
1 | architecture: SOLOv2
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_cos_pretrained.tar
3 | load_static_weights: True
4 |
5 | SOLOv2:
6 | backbone: ResNet
7 | neck: FPN
8 | solov2_head: SOLOv2Head
9 | mask_head: SOLOv2MaskHead
10 |
11 | ResNet:
12 | depth: 50
13 | norm_type: bn
14 | freeze_at: 0
15 | return_idx: [0,1,2,3]
16 | num_stages: 4
17 |
18 | FPN:
19 | out_channel: 256
20 |
21 | SOLOv2Head:
22 | seg_feat_channels: 512
23 | stacked_convs: 4
24 | num_grids: [40, 36, 24, 16, 12]
25 | kernel_out_channels: 256
26 | solov2_loss: SOLOv2Loss
27 | mask_nms: MaskMatrixNMS
28 |
29 | SOLOv2MaskHead:
30 | mid_channels: 128
31 | out_channels: 256
32 | start_level: 0
33 | end_level: 3
34 |
35 | SOLOv2Loss:
36 | ins_loss_weight: 3.0
37 | focal_loss_gamma: 2.0
38 | focal_loss_alpha: 0.25
39 |
40 | MaskMatrixNMS:
41 | pre_nms_top_n: 500
42 | post_nms_top_n: 100
43 |
--------------------------------------------------------------------------------
/dygraph/ppdet/engine/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import trainer
16 | from .trainer import *
17 |
18 | from . import callbacks
19 | from .callbacks import *
20 |
21 | from . import env
22 | from .env import *
23 |
24 | __all__ = trainer.__all__ \
25 | + callbacks.__all__ \
26 | + env.__all__
27 |
--------------------------------------------------------------------------------
/ppdet/ext_op/src/make.sh:
--------------------------------------------------------------------------------
1 | include_dir=$( python -c 'import paddle; print(paddle.sysconfig.get_include())' )
2 | lib_dir=$( python -c 'import paddle; print(paddle.sysconfig.get_lib())' )
3 |
4 | echo $include_dir
5 | echo $lib_dir
6 |
7 | OPS='bottom_pool_op top_pool_op right_pool_op left_pool_op'
8 | for op in ${OPS}
9 | do
10 | nvcc ${op}.cu -c -o ${op}.cu.o -ccbin cc -DPADDLE_WITH_CUDA -DEIGEN_USE_GPU -DPADDLE_USE_DSO -DPADDLE_WITH_MKLDNN -Xcompiler -fPIC -std=c++11 -Xcompiler -fPIC -w --expt-relaxed-constexpr -O0 -g -DNVCC \
11 | -I ${include_dir}/third_party/ \
12 | -I ${include_dir}
13 | done
14 |
15 | g++ bottom_pool_op.cc bottom_pool_op.cu.o top_pool_op.cc top_pool_op.cu.o right_pool_op.cc right_pool_op.cu.o left_pool_op.cc left_pool_op.cu.o -o cornerpool_lib.so -DPADDLE_WITH_MKLDNN -shared -fPIC -std=c++11 -O0 -g \
16 | -I ${include_dir}/third_party/ \
17 | -I ${include_dir} \
18 | -L ${lib_dir} \
19 | -L /usr/local/cuda/lib64 -lpaddle_framework -lcudart
20 |
21 | rm *.cu.o
22 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/layout/content_main.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
8 |
19 |
20 |
--------------------------------------------------------------------------------
/deploy/README.md:
--------------------------------------------------------------------------------
1 | # PaddleDetection 预测部署
2 |
3 | `PaddleDetection`目前支持:
4 | - 使用`Python`和`C++`部署在`Windows` 和`Linux` 上运行
5 | - [在线服务化部署](./serving/README.md)
6 | - [移动端部署](https://github.com/PaddlePaddle/Paddle-Lite-Demo)
7 |
8 | ## 模型导出
9 | 训练得到一个满足要求的模型后,如果想要将该模型接入到C++服务器端预测库或移动端预测库,需要通过`tools/export_model.py`导出该模型。
10 |
11 | - [导出教程](https://github.com/PaddlePaddle/PaddleDetection/blob/master/docs/advanced_tutorials/deploy/EXPORT_MODEL.md)
12 |
13 | 模型导出后, 目录结构如下(以`yolov3_darknet`为例):
14 | ```
15 | yolov3_darknet # 模型目录
16 | ├── infer_cfg.yml # 模型配置信息
17 | ├── __model__ # 模型文件
18 | └── __params__ # 参数文件
19 | ```
20 |
21 | 预测时,该目录所在的路径会作为程序的输入参数。
22 |
23 | ## 预测部署
24 | - [1. Python预测(支持 Linux 和 Windows)](https://github.com/PaddlePaddle/PaddleDetection/blob/master/deploy/python)
25 | - [2. C++预测(支持 Linux 和 Windows)](https://github.com/PaddlePaddle/PaddleDetection/blob/master/deploy/cpp)
26 | - [3. 在线服务化部署](./serving/README.md)
27 | - [4. 移动端部署](https://github.com/PaddlePaddle/Paddle-Lite-Demo)
28 | - [5. Jetson设备部署](./cpp/docs/Jetson_build.md)
29 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/assets/labels/coco-labels-2014_2017.txt:
--------------------------------------------------------------------------------
1 | person
2 | bicycle
3 | car
4 | motorcycle
5 | airplane
6 | bus
7 | train
8 | truck
9 | boat
10 | traffic light
11 | fire hydrant
12 | stop sign
13 | parking meter
14 | bench
15 | bird
16 | cat
17 | dog
18 | horse
19 | sheep
20 | cow
21 | elephant
22 | bear
23 | zebra
24 | giraffe
25 | backpack
26 | umbrella
27 | handbag
28 | tie
29 | suitcase
30 | frisbee
31 | skis
32 | snowboard
33 | sports ball
34 | kite
35 | baseball bat
36 | baseball glove
37 | skateboard
38 | surfboard
39 | tennis racket
40 | bottle
41 | wine glass
42 | cup
43 | fork
44 | knife
45 | spoon
46 | bowl
47 | banana
48 | apple
49 | sandwich
50 | orange
51 | broccoli
52 | carrot
53 | hot dog
54 | pizza
55 | donut
56 | cake
57 | chair
58 | couch
59 | potted plant
60 | bed
61 | dining table
62 | toilet
63 | tv
64 | laptop
65 | mouse
66 | remote
67 | keyboard
68 | cell phone
69 | microwave
70 | oven
71 | toaster
72 | sink
73 | refrigerator
74 | book
75 | clock
76 | vase
77 | scissors
78 | teddy bear
79 | hair drier
80 | toothbrush
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/ssd_vgg16_300.yml:
--------------------------------------------------------------------------------
1 | architecture: SSD
2 | pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/VGG16_caffe_pretrained.pdparams
3 |
4 | # Model Achitecture
5 | SSD:
6 | # model feat info flow
7 | backbone: VGG
8 | ssd_head: SSDHead
9 | # post process
10 | post_process: BBoxPostProcess
11 |
12 | VGG:
13 | depth: 16
14 | normalizations: [20., -1, -1, -1, -1, -1]
15 |
16 | SSDHead:
17 | anchor_generator:
18 | steps: [8, 16, 32, 64, 100, 300]
19 | aspect_ratios: [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]]
20 | min_ratio: 20
21 | max_ratio: 90
22 | min_sizes: [30.0, 60.0, 111.0, 162.0, 213.0, 264.0]
23 | max_sizes: [60.0, 111.0, 162.0, 213.0, 264.0, 315.0]
24 | offset: 0.5
25 | flip: true
26 | min_max_aspect_ratios_order: true
27 |
28 | BBoxPostProcess:
29 | decode:
30 | name: SSDBox
31 | nms:
32 | name: MultiClassNMS
33 | keep_top_k: 200
34 | score_threshold: 0.01
35 | nms_threshold: 0.45
36 | nms_top_k: 400
37 | nms_eta: 1.0
38 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/assets/labels/coco-labels-background.txt:
--------------------------------------------------------------------------------
1 | background
2 | person
3 | bicycle
4 | car
5 | motorcycle
6 | airplane
7 | bus
8 | train
9 | truck
10 | boat
11 | traffic light
12 | fire hydrant
13 | stop sign
14 | parking meter
15 | bench
16 | bird
17 | cat
18 | dog
19 | horse
20 | sheep
21 | cow
22 | elephant
23 | bear
24 | zebra
25 | giraffe
26 | backpack
27 | umbrella
28 | handbag
29 | tie
30 | suitcase
31 | frisbee
32 | skis
33 | snowboard
34 | sports ball
35 | kite
36 | baseball bat
37 | baseball glove
38 | skateboard
39 | surfboard
40 | tennis racket
41 | bottle
42 | wine glass
43 | cup
44 | fork
45 | knife
46 | spoon
47 | bowl
48 | banana
49 | apple
50 | sandwich
51 | orange
52 | broccoli
53 | carrot
54 | hot dog
55 | pizza
56 | donut
57 | cake
58 | chair
59 | couch
60 | potted plant
61 | bed
62 | dining table
63 | toilet
64 | tv
65 | laptop
66 | mouse
67 | remote
68 | keyboard
69 | cell phone
70 | microwave
71 | oven
72 | toaster
73 | sink
74 | refrigerator
75 | book
76 | clock
77 | vase
78 | scissors
79 | teddy bear
80 | hair drier
81 | toothbrush
--------------------------------------------------------------------------------
/dygraph/configs/gn/faster_rcnn_r50_fpn_gn_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '../faster_rcnn/_base_/optimizer_1x.yml',
5 | '../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',
6 | '../faster_rcnn/_base_/faster_fpn_reader.yml',
7 | ]
8 | weights: output/faster_rcnn_r50_fpn_gn_2x_coco/model_final
9 |
10 | FasterRCNN:
11 | backbone: ResNet
12 | neck: FPN
13 | rpn_head: RPNHead
14 | bbox_head: BBoxHead
15 | # post process
16 | bbox_post_process: BBoxPostProcess
17 |
18 | FPN:
19 | out_channel: 256
20 | norm_type: gn
21 |
22 | BBoxHead:
23 | head: XConvNormHead
24 | roi_extractor:
25 | resolution: 7
26 | sampling_ratio: 0
27 | aligned: True
28 | bbox_assigner: BBoxAssigner
29 |
30 | XConvNormHead:
31 | num_convs: 4
32 | mlp_dim: 1024
33 | norm_type: gn
34 |
35 |
36 | epoch: 24
37 | LearningRate:
38 | base_lr: 0.01
39 | schedulers:
40 | - !PiecewiseDecay
41 | gamma: 0.1
42 | milestones: [16, 22]
43 | - !LinearWarmup
44 | start_factor: 0.1
45 | steps: 1000
46 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/values/styles.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
15 |
16 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/dygraph/configs/ppyolo/ppyolo_r50vd_dcn_1x_minicoco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | './_base_/ppyolo_r50vd_dcn.yml',
5 | './_base_/optimizer_1x.yml',
6 | './_base_/ppyolo_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 8
10 | use_ema: true
11 | weights: output/ppyolo_r50vd_dcn_1x_minicoco/model_final
12 |
13 | TrainReader:
14 | batch_size: 12
15 |
16 | TrainDataset:
17 | !COCODataSet
18 | image_dir: train2017
19 | # refer to https://github.com/giddyyupp/coco-minitrain
20 | anno_path: annotations/instances_minitrain2017.json
21 | dataset_dir: dataset/coco
22 | data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
23 |
24 | epoch: 192
25 |
26 | LearningRate:
27 | base_lr: 0.005
28 | schedulers:
29 | - !PiecewiseDecay
30 | gamma: 0.1
31 | milestones:
32 | - 153
33 | - 173
34 | - !LinearWarmup
35 | start_factor: 0.
36 | steps: 4000
37 |
38 | OptimizerBuilder:
39 | optimizer:
40 | momentum: 0.9
41 | type: Momentum
42 | regularizer:
43 | factor: 0.0005
44 | type: L2
45 |
--------------------------------------------------------------------------------
/configs/gridmask/README.md:
--------------------------------------------------------------------------------
1 | # GridMask Data Augmentation
2 |
3 | ## Introduction
4 |
5 | - GridMask Data Augmentation
6 | : [https://arxiv.org/abs/2001.04086](https://arxiv.org/abs/2001.04086)
7 |
8 | ```
9 | @article{chen2020gridmask,
10 | title={GridMask data augmentation},
11 | author={Chen, Pengguang},
12 | journal={arXiv preprint arXiv:2001.04086},
13 | year={2020}
14 | }
15 | ```
16 |
17 |
18 | ## Model Zoo
19 |
20 | | Backbone | Type | Image/gpu | Lr schd | Inf time (fps) | Box AP | Mask AP | Download | Configs |
21 | | :---------------------- | :-------------: | :-------: | :-----: | :------------: | :----: | :-----: | :----------------------------------------------------------: | :-----: |
22 | | ResNet50-vd-FPN | Faster | 2 | 4x | 21.847 | 39.1% | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_r50_vd_fpn_gridmask_4x.tar) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/configs/gridmask/faster_rcnn_r50_vd_fpn_gridmask_4x.yml) |
23 |
--------------------------------------------------------------------------------
/ppdet/data/shared_queue/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 | from __future__ import unicode_literals
19 |
20 | __all__ = ['SharedBuffer', 'SharedMemoryMgr', 'SharedQueue']
21 |
22 | from .sharedmemory import SharedBuffer
23 | from .sharedmemory import SharedMemoryMgr
24 | from .sharedmemory import SharedMemoryError
25 | from .queue import SharedQueue
26 |
--------------------------------------------------------------------------------
/deploy/cpp/cmake/yaml-cpp.cmake:
--------------------------------------------------------------------------------
1 |
2 | find_package(Git REQUIRED)
3 |
4 | include(ExternalProject)
5 |
6 | message("${CMAKE_BUILD_TYPE}")
7 |
8 | ExternalProject_Add(
9 | ext-yaml-cpp
10 | URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
11 | URL_MD5 9542d6de397d1fbd649ed468cb5850e6
12 | CMAKE_ARGS
13 | -DYAML_CPP_BUILD_TESTS=OFF
14 | -DYAML_CPP_BUILD_TOOLS=OFF
15 | -DYAML_CPP_INSTALL=OFF
16 | -DYAML_CPP_BUILD_CONTRIB=OFF
17 | -DMSVC_SHARED_RT=OFF
18 | -DBUILD_SHARED_LIBS=OFF
19 | -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
20 | -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
21 | -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
22 | -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
23 | -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
24 | -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
25 | PREFIX "${CMAKE_BINARY_DIR}/ext/yaml-cpp"
26 | # Disable install step
27 | INSTALL_COMMAND ""
28 | LOG_DOWNLOAD ON
29 | LOG_BUILD 1
30 | )
31 |
--------------------------------------------------------------------------------
/dygraph/deploy/cpp/cmake/yaml-cpp.cmake:
--------------------------------------------------------------------------------
1 |
2 | find_package(Git REQUIRED)
3 |
4 | include(ExternalProject)
5 |
6 | message("${CMAKE_BUILD_TYPE}")
7 |
8 | ExternalProject_Add(
9 | ext-yaml-cpp
10 | URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
11 | URL_MD5 9542d6de397d1fbd649ed468cb5850e6
12 | CMAKE_ARGS
13 | -DYAML_CPP_BUILD_TESTS=OFF
14 | -DYAML_CPP_BUILD_TOOLS=OFF
15 | -DYAML_CPP_INSTALL=OFF
16 | -DYAML_CPP_BUILD_CONTRIB=OFF
17 | -DMSVC_SHARED_RT=OFF
18 | -DBUILD_SHARED_LIBS=OFF
19 | -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
20 | -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
21 | -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
22 | -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
23 | -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
24 | -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
25 | PREFIX "${CMAKE_BINARY_DIR}/ext/yaml-cpp"
26 | # Disable install step
27 | INSTALL_COMMAND ""
28 | LOG_DOWNLOAD ON
29 | LOG_BUILD 1
30 | )
31 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/losses/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import yolo_loss
16 | from . import iou_aware_loss
17 | from . import iou_loss
18 | from . import ssd_loss
19 | from . import fcos_loss
20 | from . import solov2_loss
21 | from . import ctfocal_loss
22 |
23 | from .yolo_loss import *
24 | from .iou_aware_loss import *
25 | from .iou_loss import *
26 | from .ssd_loss import *
27 | from .fcos_loss import *
28 | from .solov2_loss import *
29 | from .ctfocal_loss import *
30 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/_base_/yolov3_darknet53.yml:
--------------------------------------------------------------------------------
1 | architecture: YOLOv3
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_pretrained.tar
3 | load_static_weights: True
4 | norm_type: sync_bn
5 |
6 | YOLOv3:
7 | backbone: DarkNet
8 | neck: YOLOv3FPN
9 | yolo_head: YOLOv3Head
10 | post_process: BBoxPostProcess
11 |
12 | DarkNet:
13 | depth: 53
14 | return_idx: [2, 3, 4]
15 |
16 | # use default config
17 | # YOLOv3FPN:
18 |
19 | YOLOv3Head:
20 | anchors: [[10, 13], [16, 30], [33, 23],
21 | [30, 61], [62, 45], [59, 119],
22 | [116, 90], [156, 198], [373, 326]]
23 | anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
24 | loss: YOLOv3Loss
25 |
26 | YOLOv3Loss:
27 | ignore_thresh: 0.7
28 | downsample: [32, 16, 8]
29 | label_smooth: false
30 |
31 | BBoxPostProcess:
32 | decode:
33 | name: YOLOBox
34 | conf_thresh: 0.005
35 | downsample_ratio: 32
36 | clip_bbox: true
37 | nms:
38 | name: MultiClassNMS
39 | keep_top_k: 100
40 | score_threshold: 0.01
41 | nms_threshold: 0.45
42 | nms_top_k: 1000
43 | normalized: false
44 |
--------------------------------------------------------------------------------
/ppdet/modeling/roi_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 |
17 | from . import bbox_head
18 | from . import mask_head
19 | from . import cascade_head
20 | from . import htc_bbox_head
21 | from . import htc_mask_head
22 | from . import htc_semantic_head
23 |
24 | from .bbox_head import *
25 | from .mask_head import *
26 | from .cascade_head import *
27 | from .htc_bbox_head import *
28 | from .htc_mask_head import *
29 | from .htc_semantic_head import *
30 |
--------------------------------------------------------------------------------
/configs/acfpn/README.md:
--------------------------------------------------------------------------------
1 | # Attention-guided Context Feature Pyramid Network for Object Detection
2 |
3 | ## Introduction
4 |
5 | - Attention-guided Context Feature Pyramid Network for Object Detection: [https://arxiv.org/abs/2005.11475](https://arxiv.org/abs/2005.11475)
6 |
7 | ```
8 | Cao J, Chen Q, Guo J, et al. Attention-guided Context Feature Pyramid Network for Object Detection[J]. arXiv preprint arXiv:2005.11475, 2020.
9 | ```
10 |
11 |
12 | ## Model Zoo
13 |
14 | | Backbone | Type | Image/gpu | Lr schd | Inf time (fps) | Box AP | Mask AP | Download | Configs |
15 | | :---------------------- | :-------------: | :-------: | :-----: | :------------: | :----: | :-----: | :----------------------------------------------------------: | :-----: |
16 | | ResNet50-vd-ACFPN | Faster | 2 | 1x | 23.432 | 39.6 | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_r50_vd_acfpn_1x.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/configs/acfpn/faster_rcnn_r50_vd_acfpn_1x.yml) |
17 |
--------------------------------------------------------------------------------
/dygraph/configs/slim/prune/yolov3_prune_fpgm.yml:
--------------------------------------------------------------------------------
1 | # Weights of yolov3_mobilenet_v1_voc
2 | pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/yolov3_mobilenet_v1_270e_voc.pdparams
3 | load_static_weights: False
4 | weight_type: resume
5 | slim: Pruner
6 |
7 | Pruner:
8 | criterion: fpgm
9 | pruned_params: ['yolo_block.0.0.0.conv.weights', 'yolo_block.0.0.1.conv.weights', 'yolo_block.0.1.0.conv.weights',
10 | 'yolo_block.0.1.1.conv.weights', 'yolo_block.0.2.conv.weights', 'yolo_block.0.tip.conv.weights',
11 | 'yolo_block.1.0.0.conv.weights', 'yolo_block.1.0.1.conv.weights', 'yolo_block.1.1.0.conv.weights',
12 | 'yolo_block.1.1.1.conv.weights', 'yolo_block.1.2.conv.weights', 'yolo_block.1.tip.conv.weights',
13 | 'yolo_block.2.0.0.conv.weights', 'yolo_block.2.0.1.conv.weights', 'yolo_block.2.1.0.conv.weights',
14 | 'yolo_block.2.1.1.conv.weights', 'yolo_block.2.2.conv.weights', 'yolo_block.2.tip.conv.weights']
15 | pruned_ratios: [0.1,0.2,0.2,0.2,0.2,0.1,0.2,0.3,0.3,0.3,0.2,0.1,0.3,0.4,0.4,0.4,0.4,0.3]
16 | print_params: False
17 |
--------------------------------------------------------------------------------
/dygraph/dataset/voc/create_list.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import os.path as osp
17 | import logging
18 | # add python path of PadleDetection to sys.path
19 | parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
20 | if parent_path not in sys.path:
21 | sys.path.append(parent_path)
22 |
23 | from ppdet.utils.download import create_voc_list
24 |
25 | logging.basicConfig(level=logging.INFO)
26 |
27 | voc_path = osp.split(osp.realpath(sys.argv[0]))[0]
28 | create_voc_list(voc_path)
29 |
--------------------------------------------------------------------------------
/dataset/fddb/download.sh:
--------------------------------------------------------------------------------
1 | # All rights `PaddleDetection` reserved
2 | # References:
3 | # @TechReport{fddbTech,
4 | # author = {Vidit Jain and Erik Learned-Miller},
5 | # title = {FDDB: A Benchmark for Face Detection in Unconstrained Settings},
6 | # institution = {University of Massachusetts, Amherst},
7 | # year = {2010},
8 | # number = {UM-CS-2010-009}
9 | # }
10 |
11 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
12 | cd "$DIR"
13 |
14 | # Download the data.
15 | echo "Downloading..."
16 | # external link to the Faces in the Wild data set and annotations file
17 | wget http://tamaraberg.com/faceDataset/originalPics.tar.gz
18 | wget http://vis-www.cs.umass.edu/fddb/FDDB-folds.tgz
19 | wget http://vis-www.cs.umass.edu/fddb/evaluation.tgz
20 |
21 | # Extract the data.
22 | echo "Extracting..."
23 | tar -zxf originalPics.tar.gz
24 | tar -zxf FDDB-folds.tgz
25 | tar -zxf evaluation.tgz
26 |
27 | # Generate full image path list and groundtruth in FDDB-folds:
28 | cd FDDB-folds
29 | cat `ls|grep -v"ellipse"` > filePath.txt && cat *ellipse* > fddb_annotFile.txt
30 | cd ..
31 | echo "------------- All done! --------------"
32 |
--------------------------------------------------------------------------------
/dygraph/configs/slim/prune/yolov3_prune_l1_norm.yml:
--------------------------------------------------------------------------------
1 | # Weights of yolov3_mobilenet_v1_voc
2 | pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/dygraph/yolov3_mobilenet_v1_270e_voc.pdparams
3 | load_static_weights: False
4 | weight_type: resume
5 | slim: Pruner
6 |
7 | Pruner:
8 | criterion: l1_norm
9 | pruned_params: ['yolo_block.0.0.0.conv.weights', 'yolo_block.0.0.1.conv.weights', 'yolo_block.0.1.0.conv.weights',
10 | 'yolo_block.0.1.1.conv.weights', 'yolo_block.0.2.conv.weights', 'yolo_block.0.tip.conv.weights',
11 | 'yolo_block.1.0.0.conv.weights', 'yolo_block.1.0.1.conv.weights', 'yolo_block.1.1.0.conv.weights',
12 | 'yolo_block.1.1.1.conv.weights', 'yolo_block.1.2.conv.weights', 'yolo_block.1.tip.conv.weights',
13 | 'yolo_block.2.0.0.conv.weights', 'yolo_block.2.0.1.conv.weights', 'yolo_block.2.1.0.conv.weights',
14 | 'yolo_block.2.1.1.conv.weights', 'yolo_block.2.2.conv.weights', 'yolo_block.2.tip.conv.weights']
15 | pruned_ratios: [0.1,0.2,0.2,0.2,0.2,0.1,0.2,0.3,0.3,0.3,0.2,0.1,0.3,0.4,0.4,0.4,0.4,0.3]
16 | print_params: False
17 |
--------------------------------------------------------------------------------
/dataset/coco/download_coco.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import os.path as osp
17 | import logging
18 | # add python path of PadleDetection to sys.path
19 | parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
20 | if parent_path not in sys.path:
21 | sys.path.append(parent_path)
22 |
23 | from ppdet.utils.download import download_dataset
24 |
25 | logging.basicConfig(level=logging.INFO)
26 |
27 | download_path = osp.split(osp.realpath(sys.argv[0]))[0]
28 | download_dataset(download_path, 'coco')
29 |
--------------------------------------------------------------------------------
/dataset/fruit/download_fruit.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import os.path as osp
17 | import logging
18 | # add python path of PadleDetection to sys.path
19 | parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
20 | if parent_path not in sys.path:
21 | sys.path.append(parent_path)
22 |
23 | from ppdet.utils.download import download_dataset
24 |
25 | logging.basicConfig(level=logging.INFO)
26 |
27 | download_path = osp.split(osp.realpath(sys.argv[0]))[0]
28 | download_dataset(download_path, 'fruit')
29 |
--------------------------------------------------------------------------------
/dygraph/dataset/voc/download_voc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import os.path as osp
17 | import logging
18 | # add python path of PadleDetection to sys.path
19 | parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
20 | if parent_path not in sys.path:
21 | sys.path.append(parent_path)
22 |
23 | from ppdet.utils.download import download_dataset
24 |
25 | logging.basicConfig(level=logging.INFO)
26 |
27 | download_path = osp.split(osp.realpath(sys.argv[0]))[0]
28 | download_dataset(download_path, 'voc')
29 |
--------------------------------------------------------------------------------
/deploy/android_demo/gradle.properties:
--------------------------------------------------------------------------------
1 | # Project-wide Gradle settings.
2 | # IDE (e.g. Android Studio) users:
3 | # Gradle settings configured through the IDE *will override*
4 | # any settings specified in this file.
5 | # For more details on how to configure your build environment visit
6 | # http://www.gradle.org/docs/current/userguide/build_environment.html
7 | # Specifies the JVM arguments used for the daemon process.
8 | # The setting is particularly useful for tweaking memory settings.
9 | org.gradle.jvmargs=-Xmx2048m
10 | # When configured, Gradle will run in incubating parallel mode.
11 | # This option should only be used with decoupled projects. More details, visit
12 | # http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
13 | # org.gradle.parallel=true
14 | # AndroidX package structure to make it clearer which packages are bundled with the
15 | # Android operating system, and which are packaged with your app"s APK
16 | # https://developer.android.com/topic/libraries/support-library/androidx-rn
17 | android.useAndroidX=true
18 | # Automatically convert third-party libraries to use AndroidX
19 | android.enableJetifier=true
--------------------------------------------------------------------------------
/dygraph/dataset/coco/download_coco.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import os.path as osp
17 | import logging
18 | # add python path of PadleDetection to sys.path
19 | parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
20 | if parent_path not in sys.path:
21 | sys.path.append(parent_path)
22 |
23 | from ppdet.utils.download import download_dataset
24 |
25 | logging.basicConfig(level=logging.INFO)
26 |
27 | download_path = osp.split(osp.realpath(sys.argv[0]))[0]
28 | download_dataset(download_path, 'coco')
29 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/architectures/meta_arch.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import paddle
7 | import paddle.nn as nn
8 | from ppdet.core.workspace import register
9 |
10 | __all__ = ['BaseArch']
11 |
12 |
13 | @register
14 | class BaseArch(nn.Layer):
15 | def __init__(self):
16 | super(BaseArch, self).__init__()
17 |
18 | def forward(self, inputs):
19 | self.inputs = inputs
20 | self.model_arch()
21 |
22 | if self.training:
23 | out = self.get_loss()
24 | else:
25 | out = self.get_pred()
26 | return out
27 |
28 | def build_inputs(self, data, input_def):
29 | inputs = {}
30 | for i, k in enumerate(input_def):
31 | inputs[k] = data[i]
32 | return inputs
33 |
34 | def model_arch(self, ):
35 | pass
36 |
37 | def get_loss(self, ):
38 | raise NotImplementedError("Should implement get_loss method!")
39 |
40 | def get_pred(self, ):
41 | raise NotImplementedError("Should implement get_pred method!")
42 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/btn_shutter_default.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
12 |
17 |
18 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/drawable/btn_shutter_pressed.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
12 |
17 |
18 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/layout/activity_main.xml:
--------------------------------------------------------------------------------
1 |
2 |
8 |
9 |
13 |
14 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/ssdlite_mobilenet_v1_300.yml:
--------------------------------------------------------------------------------
1 | architecture: SSD
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_ssld_pretrained.tar
3 | load_static_weights: True
4 |
5 | SSD:
6 | backbone: MobileNet
7 | ssd_head: SSDHead
8 | post_process: BBoxPostProcess
9 |
10 | MobileNet:
11 | conv_decay: 0.00004
12 | scale: 1
13 | extra_block_filters: [[256, 512], [128, 256], [128, 256], [64, 128]]
14 | with_extra_blocks: true
15 | feature_maps: [11, 13, 14, 15, 16, 17]
16 |
17 | SSDHead:
18 | use_sepconv: True
19 | conv_decay: 0.00004
20 | anchor_generator:
21 | steps: [16, 32, 64, 100, 150, 300]
22 | aspect_ratios: [[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]]
23 | min_ratio: 20
24 | max_ratio: 95
25 | base_size: 300
26 | min_sizes: []
27 | max_sizes: []
28 | offset: 0.5
29 | flip: true
30 | clip: true
31 | min_max_aspect_ratios_order: False
32 |
33 | BBoxPostProcess:
34 | decode:
35 | name: SSDBox
36 | nms:
37 | name: MultiClassNMS
38 | keep_top_k: 200
39 | score_threshold: 0.01
40 | nms_threshold: 0.45
41 | nms_top_k: 400
42 | nms_eta: 1.0
43 |
--------------------------------------------------------------------------------
/configs/random_erasing/README.md:
--------------------------------------------------------------------------------
1 | # Random Erasing Data Augmentation
2 |
3 | ## Introduction
4 |
5 | - Random Erasing Data Augmentation
6 | : [https://arxiv.org/abs/1708.04896](https://arxiv.org/abs/1708.04896)
7 |
8 | ```
9 | @article{zhong1708random,
10 | title={Random erasing data augmentation. arXiv 2017},
11 | author={Zhong, Z and Zheng, L and Kang, G and Li, S and Yang, Y},
12 | journal={arXiv preprint arXiv:1708.04896}
13 | }
14 | ```
15 |
16 |
17 | ## Model Zoo
18 |
19 | | Backbone | Type | Image/gpu | Lr schd | Inf time (fps) | Box AP | Mask AP | Download | Configs |
20 | | :---------------------- | :-------------: | :-------: | :-----: | :------------: | :----: | :-----: | :----------------------------------------------------------: | :-----: |
21 | | ResNet50-vd-FPN | Faster | 2 | 4x | 21.847 | 39.0% | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_r50_vd_fpn_random_erasing_4x.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/configs/random_erasing/faster_rcnn_r50_vd_fpn_random_erasing_4x.yml) |
22 |
--------------------------------------------------------------------------------
/deploy/android_demo/app/src/main/res/values/arrays.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | - 1 threads
5 | - 2 threads
6 | - 4 threads
7 | - 8 threads
8 |
9 |
10 | - 1
11 | - 2
12 | - 4
13 | - 8
14 |
15 |
16 | - HIGH(only big cores)
17 | - LOW(only LITTLE cores)
18 | - FULL(all cores)
19 | - NO_BIND(depends on system)
20 | - RAND_HIGH
21 | - RAND_LOW
22 |
23 |
24 | - LITE_POWER_HIGH
25 | - LITE_POWER_LOW
26 | - LITE_POWER_FULL
27 | - LITE_POWER_NO_BIND
28 | - LITE_POWER_RAND_HIGH
29 | - LITE_POWER_RAND_LOW
30 |
31 |
--------------------------------------------------------------------------------
/dygraph/configs/ttfnet/_base_/ttfnet_reader.yml:
--------------------------------------------------------------------------------
1 | worker_num: 2
2 | TrainReader:
3 | sample_transforms:
4 | - Decode: {}
5 | - RandomFlip: {prob: 0.5}
6 | - Resize: {interp: 1, target_size: [512, 512], keep_ratio: False}
7 | - NormalizeImage: {mean: [123.675, 116.28, 103.53], std: [58.395, 57.12, 57.375], is_scale: false}
8 | - Permute: {}
9 | batch_transforms:
10 | - Gt2TTFTarget: {down_ratio: 4}
11 | - PadBatch: {pad_to_stride: 32, pad_gt: true}
12 | batch_size: 12
13 | shuffle: true
14 | drop_last: true
15 |
16 | EvalReader:
17 | sample_transforms:
18 | - Decode: {}
19 | - Resize: {interp: 1, target_size: [512, 512], keep_ratio: False}
20 | - NormalizeImage: {is_scale: false, mean: [123.675, 116.28, 103.53], std: [58.395, 57.12, 57.375]}
21 | - Permute: {}
22 | batch_size: 1
23 | drop_last: false
24 | drop_empty: false
25 |
26 | TestReader:
27 | sample_transforms:
28 | - Decode: {}
29 | - Resize: {interp: 1, target_size: [512, 512], keep_ratio: False}
30 | - NormalizeImage: {is_scale: false, mean: [123.675, 116.28, 103.53], std: [58.395, 57.12, 57.375]}
31 | - Permute: {}
32 | batch_size: 1
33 | drop_last: false
34 | drop_empty: false
35 |
--------------------------------------------------------------------------------
/dygraph/dataset/roadsign_voc/download_roadsign_voc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import os.path as osp
17 | import logging
18 | # add python path of PadleDetection to sys.path
19 | parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
20 | if parent_path not in sys.path:
21 | sys.path.append(parent_path)
22 |
23 | from ppdet.utils.download import download_dataset
24 |
25 | logging.basicConfig(level=logging.INFO)
26 |
27 | download_path = osp.split(osp.realpath(sys.argv[0]))[0]
28 | download_dataset(download_path, 'roadsign_voc')
29 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/_base_/yolov3_mobilenet_v1.yml:
--------------------------------------------------------------------------------
1 | architecture: YOLOv3
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar
3 | load_static_weights: True
4 | norm_type: sync_bn
5 |
6 | YOLOv3:
7 | backbone: MobileNet
8 | neck: YOLOv3FPN
9 | yolo_head: YOLOv3Head
10 | post_process: BBoxPostProcess
11 |
12 | MobileNet:
13 | scale: 1
14 | feature_maps: [4, 6, 13]
15 | with_extra_blocks: false
16 | extra_block_filters: []
17 |
18 | # use default config
19 | # YOLOv3FPN:
20 |
21 | YOLOv3Head:
22 | anchors: [[10, 13], [16, 30], [33, 23],
23 | [30, 61], [62, 45], [59, 119],
24 | [116, 90], [156, 198], [373, 326]]
25 | anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
26 | loss: YOLOv3Loss
27 |
28 | YOLOv3Loss:
29 | ignore_thresh: 0.7
30 | downsample: [32, 16, 8]
31 | label_smooth: false
32 |
33 | BBoxPostProcess:
34 | decode:
35 | name: YOLOBox
36 | conf_thresh: 0.005
37 | downsample_ratio: 32
38 | clip_bbox: true
39 | nms:
40 | name: MultiClassNMS
41 | keep_top_k: 100
42 | score_threshold: 0.01
43 | nms_threshold: 0.45
44 | nms_top_k: 1000
45 | normalized: false
46 |
--------------------------------------------------------------------------------
/dataset/voc/download_voc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import os.path as osp
17 | import logging
18 | # add python path of PadleDetection to sys.path
19 | parent_path = osp.abspath(osp.join(__file__, *(['..'] * 3)))
20 | if parent_path not in sys.path:
21 | sys.path.append(parent_path)
22 |
23 | from ppdet.utils.download import download_dataset, create_voc_list
24 |
25 | logging.basicConfig(level=logging.INFO)
26 |
27 | download_path = osp.split(osp.realpath(sys.argv[0]))[0]
28 | download_dataset(download_path, 'voc')
29 | create_voc_list(download_path)
30 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/_base_/yolov3_r50vd_dcn.yml:
--------------------------------------------------------------------------------
1 | architecture: YOLOv3
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_ssld_pretrained.tar
3 | load_static_weights: True
4 | norm_type: sync_bn
5 |
6 | YOLOv3:
7 | backbone: ResNet
8 | neck: YOLOv3FPN
9 | yolo_head: YOLOv3Head
10 | post_process: BBoxPostProcess
11 |
12 | ResNet:
13 | depth: 50
14 | variant: d
15 | return_idx: [1, 2, 3]
16 | dcn_v2_stages: [3]
17 | freeze_at: -1
18 | freeze_norm: false
19 | norm_decay: 0.
20 |
21 | # YOLOv3FPN:
22 |
23 | YOLOv3Head:
24 | anchors: [[10, 13], [16, 30], [33, 23],
25 | [30, 61], [62, 45], [59, 119],
26 | [116, 90], [156, 198], [373, 326]]
27 | anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
28 | loss: YOLOv3Loss
29 |
30 | YOLOv3Loss:
31 | ignore_thresh: 0.7
32 | downsample: [32, 16, 8]
33 | label_smooth: false
34 |
35 | BBoxPostProcess:
36 | decode:
37 | name: YOLOBox
38 | conf_thresh: 0.005
39 | downsample_ratio: 32
40 | clip_bbox: true
41 | nms:
42 | name: MultiClassNMS
43 | keep_top_k: 100
44 | score_threshold: 0.01
45 | nms_threshold: 0.45
46 | nms_top_k: 1000
47 | normalized: false
48 |
--------------------------------------------------------------------------------
/dygraph/configs/fcos/fcos_r50_fpn_multiscale_2x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '_base_/fcos_r50_fpn.yml',
5 | '_base_/optimizer_1x.yml',
6 | '_base_/fcos_reader.yml',
7 | ]
8 |
9 | weights: output/fcos_r50_fpn_multiscale_2x_coco/model_final
10 |
11 | TrainReader:
12 | sample_transforms:
13 | - Decode: {}
14 | - RandomFlip: {prob: 0.5}
15 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
16 | - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], keep_ratio: true, interp: 1}
17 | - Permute: {}
18 | batch_transforms:
19 | - PadBatch: {pad_to_stride: 128}
20 | - Gt2FCOSTarget:
21 | object_sizes_boundary: [64, 128, 256, 512]
22 | center_sampling_radius: 1.5
23 | downsample_ratios: [8, 16, 32, 64, 128]
24 | norm_reg_targets: True
25 | batch_size: 2
26 | shuffle: true
27 | drop_last: true
28 |
29 | epoch: 24
30 |
31 | LearningRate:
32 | base_lr: 0.01
33 | schedulers:
34 | - !PiecewiseDecay
35 | gamma: 0.1
36 | milestones: [16, 22]
37 | - !LinearWarmup
38 | start_factor: 0.3333333333333333
39 | steps: 500
40 |
--------------------------------------------------------------------------------
/dygraph/configs/ttfnet/README.md:
--------------------------------------------------------------------------------
1 | # TTFNet
2 |
3 | ## 简介
4 |
5 | TTFNet是一种用于实时目标检测且对训练时间友好的网络,对CenterNet收敛速度慢的问题进行改进,提出了利用高斯核生成训练样本的新方法,有效的消除了anchor-free head中存在的模糊性。同时简单轻量化的网络结构也易于进行任务扩展。
6 |
7 | **特点:**
8 |
9 | 结构简单,仅需要两个head检测目标位置和大小,并且去除了耗时的后处理操作
10 | 训练时间短,基于DarkNet53的骨干网路,V100 8卡仅需要训练2个小时即可达到较好的模型效果
11 |
12 | ## Model Zoo
13 |
14 | | 骨架网络 | 网络类型 | 每张GPU图片个数 | 学习率策略 |推理时间(fps) | Box AP | 下载 | 配置文件 |
15 | | :-------------- | :------------- | :-----: | :-----: | :------------: | :-----: | :-----------------------------------------------------: | :-----: |
16 | | DarkNet53 | TTFNet | 12 | 1x | ---- | 33.5 | [下载链接](https://paddledet.bj.bcebos.com/models/ttfnet_darknet53_1x_coco.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/ttfnet/ttfnet_darknet53_1x_coco.yml) |
17 |
18 | ## Citations
19 | ```
20 | @article{liu2019training,
21 | title = {Training-Time-Friendly Network for Real-Time Object Detection},
22 | author = {Zili Liu, Tu Zheng, Guodong Xu, Zheng Yang, Haifeng Liu, Deng Cai},
23 | journal = {arXiv preprint arXiv:1909.00700},
24 | year = {2019}
25 | }
26 | ```
27 |
--------------------------------------------------------------------------------
/ppdet/modeling/anchor_heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 |
17 | from . import rpn_head
18 | from . import yolo_head
19 | from . import retina_head
20 | from . import fcos_head
21 | from . import corner_head
22 | from . import efficient_head
23 | from . import ttf_head
24 | from . import solov2_head
25 |
26 | from .rpn_head import *
27 | from .yolo_head import *
28 | from .retina_head import *
29 | from .fcos_head import *
30 | from .corner_head import *
31 | from .efficient_head import *
32 | from .ttf_head import *
33 | from .solov2_head import *
34 |
--------------------------------------------------------------------------------
/configs/htc/README.md:
--------------------------------------------------------------------------------
1 | # Hybrid Task Cascade for Instance Segmentation
2 |
3 | ## Introduction
4 |
5 | We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518).
6 |
7 | ```
8 | @inproceedings{chen2019hybrid,
9 | title={Hybrid task cascade for instance segmentation},
10 | author={Chen, Kai and Pang, Jiangmiao and Wang, Jiaqi and Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and Liu, Ziwei and Shi, Jianping and Ouyang, Wanli and Chen Change Loy and Dahua Lin},
11 | booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
12 | year={2019}
13 | }
14 | ```
15 |
16 | ## Dataset
17 |
18 | HTC requires COCO and COCO-stuff dataset for training.
19 |
20 | ## Results and Models
21 |
22 | The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val)
23 |
24 | | Backbone | Lr schd | Inf time (fps) | box AP | mask AP | Download |
25 | |:---------:|:-------:|:--------------:|:------:|:-------:|:--------:|
26 | | R-50-FPN | 1x | 11 | 42.9 | 37.0 | [model](https://paddlemodels.bj.bcebos.com/object_detection/htc_r50_fpn_1x.pdparams ) |
27 |
--------------------------------------------------------------------------------
/dygraph/ppdet/modeling/heads/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import bbox_head
16 | from . import mask_head
17 | from . import yolo_head
18 | from . import roi_extractor
19 | from . import ssd_head
20 | from . import fcos_head
21 | from . import solov2_head
22 | from . import ttf_head
23 | from . import cascade_head
24 |
25 | from .bbox_head import *
26 | from .mask_head import *
27 | from .yolo_head import *
28 | from .roi_extractor import *
29 | from .ssd_head import *
30 | from .fcos_head import *
31 | from .solov2_head import *
32 | from .ttf_head import *
33 | from .cascade_head import *
34 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/ssd_mobilenet_v1_300.yml:
--------------------------------------------------------------------------------
1 | architecture: SSD
2 | pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/ssd_mobilenet_v1_coco_pretrained.tar
3 | load_static_weights: True
4 |
5 | SSD:
6 | backbone: MobileNet
7 | ssd_head: SSDHead
8 | post_process: BBoxPostProcess
9 |
10 | MobileNet:
11 | norm_decay: 0.
12 | scale: 1
13 | conv_learning_rate: 0.1
14 | extra_block_filters: [[256, 512], [128, 256], [128, 256], [64, 128]]
15 | with_extra_blocks: true
16 | feature_maps: [11, 13, 14, 15, 16, 17]
17 |
18 | SSDHead:
19 | kernel_size: 1
20 | padding: 0
21 | anchor_generator:
22 | steps: [0, 0, 0, 0, 0, 0]
23 | aspect_ratios: [[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]]
24 | min_ratio: 20
25 | max_ratio: 90
26 | base_size: 300
27 | min_sizes: [60.0, 105.0, 150.0, 195.0, 240.0, 285.0]
28 | max_sizes: [[], 150.0, 195.0, 240.0, 285.0, 300.0]
29 | offset: 0.5
30 | flip: true
31 | min_max_aspect_ratios_order: false
32 |
33 | BBoxPostProcess:
34 | decode:
35 | name: SSDBox
36 | nms:
37 | name: MultiClassNMS
38 | keep_top_k: 200
39 | score_threshold: 0.01
40 | nms_threshold: 0.45
41 | nms_top_k: 400
42 | nms_eta: 1.0
43 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/_base_/yolov3_mobilenet_v3_small.yml:
--------------------------------------------------------------------------------
1 | architecture: YOLOv3
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_small_x1_0_pretrained.tar
3 | load_static_weights: True
4 | norm_type: sync_bn
5 |
6 | YOLOv3:
7 | backbone: MobileNetV3
8 | neck: YOLOv3FPN
9 | yolo_head: YOLOv3Head
10 | post_process: BBoxPostProcess
11 |
12 | MobileNetV3:
13 | model_name: small
14 | scale: 1.
15 | with_extra_blocks: false
16 | extra_block_filters: []
17 | feature_maps: [4, 9, 12]
18 |
19 | # use default config
20 | # YOLOv3FPN:
21 |
22 | YOLOv3Head:
23 | anchors: [[10, 13], [16, 30], [33, 23],
24 | [30, 61], [62, 45], [59, 119],
25 | [116, 90], [156, 198], [373, 326]]
26 | anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
27 | loss: YOLOv3Loss
28 |
29 | YOLOv3Loss:
30 | ignore_thresh: 0.7
31 | downsample: [32, 16, 8]
32 | label_smooth: false
33 |
34 | BBoxPostProcess:
35 | decode:
36 | name: YOLOBox
37 | conf_thresh: 0.005
38 | downsample_ratio: 32
39 | clip_bbox: true
40 | nms:
41 | name: MultiClassNMS
42 | keep_top_k: 100
43 | score_threshold: 0.01
44 | nms_threshold: 0.45
45 | nms_top_k: 1000
46 | normalized: false
47 |
--------------------------------------------------------------------------------
/dygraph/configs/vehicle/vehicle_yolov3_darknet.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_detection.yml',
3 | '../runtime.yml',
4 | '../yolov3/_base_/optimizer_270e.yml',
5 | '../yolov3/_base_/yolov3_darknet53.yml',
6 | '../yolov3/_base_/yolov3_reader.yml',
7 | ]
8 |
9 | snapshot_epoch: 5
10 | weights: https://paddledet.bj.bcebos.com/models/vehicle_yolov3_darknet.pdparams
11 |
12 | YOLOv3Head:
13 | anchors: [[8, 9], [10, 23], [19, 15],
14 | [23, 33], [40, 25], [54, 50],
15 | [101, 80], [139, 145], [253, 224]]
16 |
17 | BBoxPostProcess:
18 | nms:
19 | name: MultiClassNMS
20 | keep_top_k: 100
21 | score_threshold: 0.005
22 | nms_threshold: 0.45
23 | nms_top_k: 400
24 | normalized: false
25 |
26 | num_classes: 6
27 |
28 | TrainDataset:
29 | !COCODataSet
30 | dataset_dir: dataset/vehicle
31 | anno_path: annotations/instances_train2017.json
32 | image_dir: train2017
33 | data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
34 |
35 | EvalDataset:
36 | !COCODataSet
37 | dataset_dir: dataset/vehicle
38 | anno_path: annotations/instances_val2017.json
39 | image_dir: val2017
40 |
41 | TestDataset:
42 | !ImageFolder
43 | anno_path: configs/vehicle/vehicle.json
44 |
--------------------------------------------------------------------------------
/dygraph/configs/yolov3/_base_/yolov3_mobilenet_v3_large.yml:
--------------------------------------------------------------------------------
1 | architecture: YOLOv3
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_pretrained.tar
3 | load_static_weights: True
4 | norm_type: sync_bn
5 |
6 | YOLOv3:
7 | backbone: MobileNetV3
8 | neck: YOLOv3FPN
9 | yolo_head: YOLOv3Head
10 | post_process: BBoxPostProcess
11 |
12 | MobileNetV3:
13 | model_name: large
14 | scale: 1.
15 | with_extra_blocks: false
16 | extra_block_filters: []
17 | feature_maps: [7, 13, 16]
18 |
19 | # use default config
20 | # YOLOv3FPN:
21 |
22 | YOLOv3Head:
23 | anchors: [[10, 13], [16, 30], [33, 23],
24 | [30, 61], [62, 45], [59, 119],
25 | [116, 90], [156, 198], [373, 326]]
26 | anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
27 | loss: YOLOv3Loss
28 |
29 | YOLOv3Loss:
30 | ignore_thresh: 0.7
31 | downsample: [32, 16, 8]
32 | label_smooth: false
33 |
34 | BBoxPostProcess:
35 | decode:
36 | name: YOLOBox
37 | conf_thresh: 0.005
38 | downsample_ratio: 32
39 | clip_bbox: true
40 | nms:
41 | name: MultiClassNMS
42 | keep_top_k: 100
43 | score_threshold: 0.01
44 | nms_threshold: 0.45
45 | nms_top_k: 1000
46 | normalized: false
47 |
--------------------------------------------------------------------------------
/dygraph/configs/solov2/solov2_r50_fpn_3x_coco.yml:
--------------------------------------------------------------------------------
1 | _BASE_: [
2 | '../datasets/coco_instance.yml',
3 | '../runtime.yml',
4 | '_base_/solov2_r50_fpn.yml',
5 | '_base_/optimizer_1x.yml',
6 | '_base_/solov2_reader.yml',
7 | ]
8 | weights: output/solov2_r50_fpn_3x_coco/model_final
9 | epoch: 36
10 |
11 | LearningRate:
12 | base_lr: 0.01
13 | schedulers:
14 | - !PiecewiseDecay
15 | gamma: 0.1
16 | milestones: [24, 33]
17 | - !LinearWarmup
18 | start_factor: 0.
19 | steps: 1000
20 |
21 | TrainReader:
22 | sample_transforms:
23 | - Decode: {}
24 | - Poly2Mask: {}
25 | - RandomResize: {interp: 1,
26 | target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]],
27 | keep_ratio: True}
28 | - RandomFlip: {}
29 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
30 | - Permute: {}
31 | batch_transforms:
32 | - PadBatch: {pad_to_stride: 32}
33 | - Gt2Solov2Target: {num_grids: [40, 36, 24, 16, 12],
34 | scale_ranges: [[1, 96], [48, 192], [96, 384], [192, 768], [384, 2048]],
35 | coord_sigma: 0.2}
36 | batch_size: 2
37 | shuffle: true
38 | drop_last: true
39 |
--------------------------------------------------------------------------------
/ppdet/modeling/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 |
17 | # XXX for triggering decorators
18 | from . import anchor_heads
19 | from . import architectures
20 | from . import backbones
21 | from . import roi_extractors
22 | from . import roi_heads
23 | from . import ops
24 | from . import target_assigners
25 | from . import mask_head
26 |
27 | from .anchor_heads import *
28 | from .architectures import *
29 | from .backbones import *
30 | from .roi_extractors import *
31 | from .roi_heads import *
32 | from .ops import *
33 | from .target_assigners import *
34 | from .mask_head import *
35 |
--------------------------------------------------------------------------------
/ppdet/modeling/tests/decorator_helper.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import paddle.fluid as fluid
16 |
17 | __all__ = ['prog_scope']
18 |
19 |
20 | def prog_scope():
21 | def __impl__(fn):
22 | def __fn__(*args, **kwargs):
23 | prog = fluid.Program()
24 | startup_prog = fluid.Program()
25 | scope = fluid.core.Scope()
26 | with fluid.scope_guard(scope):
27 | with fluid.program_guard(prog, startup_prog):
28 | with fluid.unique_name.guard():
29 | fn(*args, **kwargs)
30 |
31 | return __fn__
32 |
33 | return __impl__
34 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/ssdlite_mobilenet_v3_large_320.yml:
--------------------------------------------------------------------------------
1 | architecture: SSD
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_ssld_pretrained.tar
3 | load_static_weights: True
4 |
5 | SSD:
6 | backbone: MobileNetV3
7 | ssd_head: SSDHead
8 | post_process: BBoxPostProcess
9 |
10 | MobileNetV3:
11 | scale: 1.0
12 | model_name: large
13 | conv_decay: 0.00004
14 | with_extra_blocks: true
15 | extra_block_filters: [[256, 512], [128, 256], [128, 256], [64, 128]]
16 | feature_maps: [14, 17, 18, 19, 20, 21]
17 | lr_mult_list: [0.25, 0.25, 0.5, 0.5, 0.75]
18 | multiplier: 0.5
19 |
20 | SSDHead:
21 | use_sepconv: True
22 | conv_decay: 0.00004
23 | anchor_generator:
24 | steps: [16, 32, 64, 107, 160, 320]
25 | aspect_ratios: [[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]]
26 | min_ratio: 20
27 | max_ratio: 95
28 | base_size: 320
29 | min_sizes: []
30 | max_sizes: []
31 | offset: 0.5
32 | flip: true
33 | clip: true
34 | min_max_aspect_ratios_order: false
35 |
36 | BBoxPostProcess:
37 | decode:
38 | name: SSDBox
39 | nms:
40 | name: MultiClassNMS
41 | keep_top_k: 200
42 | score_threshold: 0.01
43 | nms_threshold: 0.45
44 | nms_top_k: 400
45 | nms_eta: 1.0
46 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/ssdlite_mobilenet_v3_small_320.yml:
--------------------------------------------------------------------------------
1 | architecture: SSD
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_small_x1_0_ssld_pretrained.tar
3 | load_static_weights: True
4 |
5 | SSD:
6 | backbone: MobileNetV3
7 | ssd_head: SSDHead
8 | post_process: BBoxPostProcess
9 |
10 | MobileNetV3:
11 | scale: 1.0
12 | model_name: small
13 | conv_decay: 0.00004
14 | with_extra_blocks: true
15 | extra_block_filters: [[256, 512], [128, 256], [128, 256], [64, 128]]
16 | feature_maps: [10, 13, 14, 15, 16, 17]
17 | lr_mult_list: [0.25, 0.25, 0.5, 0.5, 0.75]
18 | multiplier: 0.5
19 |
20 | SSDHead:
21 | use_sepconv: True
22 | conv_decay: 0.00004
23 | anchor_generator:
24 | steps: [16, 32, 64, 107, 160, 320]
25 | aspect_ratios: [[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]]
26 | min_ratio: 20
27 | max_ratio: 95
28 | base_size: 320
29 | min_sizes: []
30 | max_sizes: []
31 | offset: 0.5
32 | flip: true
33 | clip: true
34 | min_max_aspect_ratios_order: false
35 |
36 | BBoxPostProcess:
37 | decode:
38 | name: SSDBox
39 | nms:
40 | name: MultiClassNMS
41 | keep_top_k: 200
42 | score_threshold: 0.01
43 | nms_threshold: 0.45
44 | nms_top_k: 400
45 | nms_eta: 1.0
46 |
--------------------------------------------------------------------------------
/dygraph/configs/gn/README.md:
--------------------------------------------------------------------------------
1 | # Group Normalization
2 |
3 | ## Model Zoo
4 |
5 | | 骨架网络 | 网络类型 | 每张GPU图片个数 | 学习率策略 |推理时间(fps)| Box AP | Mask AP | 下载 | 配置文件 |
6 | | :------------- | :------------- | :-----------: | :------: | :--------: |:-----: | :-----: | :----: | :----: |
7 | | ResNet50-FPN | Faster | 1 | 2x | - | 41.9 | - | [下载链接](https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_gn_2x_coco.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/gn/faster_rcnn_r50_fpn_gn_2x_coco.yml) |
8 | | ResNet50-FPN | Mask | 1 | 2x | - | 42.3 | 38.4 | [下载链接](https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_gn_2x_coco.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/gn/mask_rcnn_r50_fpn_gn_2x_coco.yml) |
9 |
10 | **注意:** Faster R-CNN baseline仅使用 `2fc` head,而此处使用[`4conv1fc` head](https://arxiv.org/abs/1803.08494)(4层conv之间使用GN),并且FPN也使用GN,而对于Mask R-CNN是在mask head的4层conv之间也使用GN。
11 |
12 | ## Citations
13 | ```
14 | @inproceedings{wu2018group,
15 | title={Group Normalization},
16 | author={Wu, Yuxin and He, Kaiming},
17 | booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
18 | year={2018}
19 | }
20 | ```
21 |
--------------------------------------------------------------------------------
/dygraph/configs/fcos/_base_/fcos_r50_fpn.yml:
--------------------------------------------------------------------------------
1 | architecture: FCOS
2 | pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_cos_pretrained.tar
3 | load_static_weights: True
4 |
5 | FCOS:
6 | backbone: ResNet
7 | neck: FPN
8 | fcos_head: FCOSHead
9 | fcos_post_process: FCOSPostProcess
10 |
11 | ResNet:
12 | # index 0 stands for res2
13 | depth: 50
14 | norm_type: bn
15 | freeze_at: 0
16 | return_idx: [1,2,3]
17 | num_stages: 4
18 |
19 | FPN:
20 | out_channel: 256
21 | spatial_scales: [0.125, 0.0625, 0.03125]
22 | extra_stage: 2
23 | has_extra_convs: true
24 | use_c5: false
25 |
26 | FCOSHead:
27 | fcos_feat:
28 | name: FCOSFeat
29 | feat_in: 256
30 | feat_out: 256
31 | num_convs: 4
32 | norm_type: "gn"
33 | use_dcn: false
34 | num_classes: 80
35 | fpn_stride: [8, 16, 32, 64, 128]
36 | prior_prob: 0.01
37 | fcos_loss: FCOSLoss
38 | norm_reg_targets: true
39 | centerness_on_reg: true
40 |
41 | FCOSLoss:
42 | loss_alpha: 0.25
43 | loss_gamma: 2.0
44 | iou_loss_type: "giou"
45 | reg_weights: 1.0
46 |
47 | FCOSPostProcess:
48 | decode:
49 | name: FCOSBox
50 | num_classes: 80
51 | batch_size: 1
52 | nms:
53 | name: MultiClassNMS
54 | nms_top_k: 1000
55 | keep_top_k: 100
56 | score_threshold: 0.025
57 | nms_threshold: 0.6
58 |
--------------------------------------------------------------------------------
/dygraph/configs/ssd/_base_/ssd_reader.yml:
--------------------------------------------------------------------------------
1 | worker_num: 2
2 | TrainReader:
3 | inputs_def:
4 | num_max_boxes: 90
5 |
6 | sample_transforms:
7 | - Decode: {}
8 | - RandomDistort: {brightness: [0.5, 1.125, 0.875], random_apply: False}
9 | - RandomExpand: {fill_value: [104., 117., 123.]}
10 | - RandomCrop: {allow_no_crop: true}
11 | - RandomFlip: {}
12 | - Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}
13 | - NormalizeBox: {}
14 | - PadBox: {num_max_boxes: 90}
15 |
16 | batch_transforms:
17 | - NormalizeImage: {mean: [104., 117., 123.], std: [1., 1., 1.], is_scale: false}
18 | - Permute: {}
19 |
20 | batch_size: 8
21 | shuffle: true
22 | drop_last: true
23 |
24 |
25 | EvalReader:
26 | sample_transforms:
27 | - Decode: {}
28 | - Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}
29 | - NormalizeImage: {mean: [104., 117., 123.], std: [1., 1., 1.], is_scale: false}
30 | - Permute: {}
31 | batch_size: 1
32 | drop_empty: false
33 |
34 | TestReader:
35 | inputs_def:
36 | image_shape: [3, 300, 300]
37 | sample_transforms:
38 | - Decode: {}
39 | - Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}
40 | - NormalizeImage: {mean: [104., 117., 123.], std: [1., 1., 1.], is_scale: false}
41 | - Permute: {}
42 | batch_size: 1
43 |
--------------------------------------------------------------------------------
/dygraph/.gitignore:
--------------------------------------------------------------------------------
1 | # Virtualenv
2 | /.venv/
3 | /venv/
4 |
5 | # Byte-compiled / optimized / DLL files
6 | __pycache__/
7 | .ipynb_checkpoints/
8 | *.py[cod]
9 |
10 | # C extensions
11 | *.so
12 |
13 | # json file
14 | *.json
15 |
16 | # Distribution / packaging
17 | /bin/
18 | /build/
19 | /develop-eggs/
20 | /dist/
21 | /eggs/
22 | /lib/
23 | /lib64/
24 | /output/
25 | /inference_model/
26 | /parts/
27 | /sdist/
28 | /var/
29 | /*.egg-info/
30 | /.installed.cfg
31 | /*.egg
32 | /.eggs
33 |
34 | # AUTHORS and ChangeLog will be generated while packaging
35 | /AUTHORS
36 | /ChangeLog
37 |
38 | # BCloud / BuildSubmitter
39 | /build_submitter.*
40 | /logger_client_log
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | .tox/
48 | .coverage
49 | .cache
50 | .pytest_cache
51 | nosetests.xml
52 | coverage.xml
53 |
54 | # Translations
55 | *.mo
56 |
57 | # Sphinx documentation
58 | /docs/_build/
59 |
60 | *.json
61 | *.tar
62 | *.pyc
63 |
64 | .idea/
65 |
66 | dataset/coco/annotations
67 | dataset/coco/train2017
68 | dataset/coco/val2017
69 | dataset/voc/VOCdevkit
70 | dataset/fruit/fruit-detection/
71 | dataset/voc/test.txt
72 | dataset/voc/trainval.txt
73 | dataset/wider_face/WIDER_test
74 | dataset/wider_face/WIDER_train
75 | dataset/wider_face/WIDER_val
76 | dataset/wider_face/wider_face_split
77 |
--------------------------------------------------------------------------------
/dygraph/configs/mask_rcnn/_base_/mask_fpn_reader.yml:
--------------------------------------------------------------------------------
1 | worker_num: 2
2 | TrainReader:
3 | sample_transforms:
4 | - Decode: {}
5 | - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], interp: 2, keep_ratio: True}
6 | - RandomFlip: {prob: 0.5}
7 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
8 | - Permute: {}
9 | batch_transforms:
10 | - PadBatch: {pad_to_stride: 32, pad_gt: true}
11 | batch_size: 1
12 | shuffle: true
13 | drop_last: true
14 |
15 | EvalReader:
16 | sample_transforms:
17 | - Decode: {}
18 | - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
19 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
20 | - Permute: {}
21 | batch_transforms:
22 | - PadBatch: {pad_to_stride: 32, pad_gt: false}
23 | batch_size: 1
24 | shuffle: false
25 | drop_last: false
26 | drop_empty: false
27 |
28 |
29 | TestReader:
30 | sample_transforms:
31 | - Decode: {}
32 | - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
33 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
34 | - Permute: {}
35 | batch_transforms:
36 | - PadBatch: {pad_to_stride: 32, pad_gt: false}
37 | batch_size: 1
38 | shuffle: false
39 | drop_last: false
40 |
--------------------------------------------------------------------------------
/dygraph/configs/cascade_rcnn/_base_/cascade_fpn_reader.yml:
--------------------------------------------------------------------------------
1 | worker_num: 2
2 | TrainReader:
3 | sample_transforms:
4 | - Decode: {}
5 | - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], interp: 2, keep_ratio: True}
6 | - RandomFlip: {prob: 0.5}
7 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
8 | - Permute: {}
9 | batch_transforms:
10 | - PadBatch: {pad_to_stride: 32, pad_gt: true}
11 | batch_size: 1
12 | shuffle: true
13 | drop_last: true
14 |
15 |
16 | EvalReader:
17 | sample_transforms:
18 | - Decode: {}
19 | - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
20 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
21 | - Permute: {}
22 | batch_transforms:
23 | - PadBatch: {pad_to_stride: 32, pad_gt: false}
24 | batch_size: 1
25 | shuffle: false
26 | drop_last: false
27 | drop_empty: false
28 |
29 |
30 | TestReader:
31 | sample_transforms:
32 | - Decode: {}
33 | - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
34 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
35 | - Permute: {}
36 | batch_transforms:
37 | - PadBatch: {pad_to_stride: 32, pad_gt: false}
38 | batch_size: 1
39 | shuffle: false
40 | drop_last: false
41 |
--------------------------------------------------------------------------------
/dygraph/configs/faster_rcnn/_base_/faster_fpn_reader.yml:
--------------------------------------------------------------------------------
1 | worker_num: 2
2 | TrainReader:
3 | sample_transforms:
4 | - Decode: {}
5 | - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], interp: 2, keep_ratio: True}
6 | - RandomFlip: {prob: 0.5}
7 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
8 | - Permute: {}
9 | batch_transforms:
10 | - PadBatch: {pad_to_stride: 32, pad_gt: true}
11 | batch_size: 1
12 | shuffle: true
13 | drop_last: true
14 |
15 |
16 | EvalReader:
17 | sample_transforms:
18 | - Decode: {}
19 | - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
20 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
21 | - Permute: {}
22 | batch_transforms:
23 | - PadBatch: {pad_to_stride: 32, pad_gt: false}
24 | batch_size: 1
25 | shuffle: false
26 | drop_last: false
27 | drop_empty: false
28 |
29 |
30 | TestReader:
31 | sample_transforms:
32 | - Decode: {}
33 | - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}
34 | - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
35 | - Permute: {}
36 | batch_transforms:
37 | - PadBatch: {pad_to_stride: 32, pad_gt: false}
38 | batch_size: 1
39 | shuffle: false
40 | drop_last: false
41 |
--------------------------------------------------------------------------------