├── .copyright.hook
├── .gitattributes
├── .github
└── ISSUE_TEMPLATE
│ ├── ---bug-report---bug--.md
│ ├── ---feature-request--------.md
│ └── ---general-issue-------.md
├── .gitignore
├── .pre-commit-config.yaml
├── .style.yapf
├── .travis.yml
├── EISeg
├── LICENSE
├── MANIFEST.in
├── README.md
├── README_AR.md
├── README_EN.md
├── docs
│ ├── image.md
│ ├── image_en.md
│ ├── install.md
│ ├── install_en.md
│ ├── medical.md
│ ├── medical_en.md
│ ├── remote_sensing.md
│ ├── remote_sensing_en.md
│ ├── tools.md
│ ├── video.md
│ └── video_en.md
├── eiseg
│ ├── __init__.py
│ ├── __main__.py
│ ├── app.py
│ ├── config
│ │ ├── colormap.txt
│ │ └── config.yaml
│ ├── controller.py
│ ├── exe.py
│ ├── inference
│ │ ├── __init__.py
│ │ ├── clicker.py
│ │ ├── predictor
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ └── ops.py
│ │ └── transforms
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── crops.py
│ │ │ ├── flip.py
│ │ │ ├── limit_longest_side.py
│ │ │ └── zoom_in.py
│ ├── models.py
│ ├── plugin
│ │ ├── __init__.py
│ │ ├── medical
│ │ │ ├── __init__.py
│ │ │ └── med.py
│ │ ├── n2grid
│ │ │ ├── __init__.py
│ │ │ ├── grid.py
│ │ │ └── rs_grid.py
│ │ ├── remotesensing
│ │ │ ├── __init__.py
│ │ │ ├── imgtools.py
│ │ │ ├── raster.py
│ │ │ └── shape.py
│ │ └── video
│ │ │ ├── __init__.py
│ │ │ ├── inference_core.py
│ │ │ ├── load_model.py
│ │ │ ├── util
│ │ │ ├── __init__.py
│ │ │ ├── range_transform.py
│ │ │ └── tensor_util.py
│ │ │ └── video_tools.py
│ ├── resource
│ │ ├── 3D.png
│ │ ├── About.png
│ │ ├── AutoSave.png
│ │ ├── ChangeOutputDir.png
│ │ ├── Clear.png
│ │ ├── ClearLabel.png
│ │ ├── ClearRecent.png
│ │ ├── Close.png
│ │ ├── Data.png
│ │ ├── DeleteAllPolygon.png
│ │ ├── DeletePolygon.png
│ │ ├── Egypt.png
│ │ ├── English.png
│ │ ├── ExportLabel.png
│ │ ├── File.png
│ │ ├── ImportLabel.png
│ │ ├── Label.png
│ │ ├── Language.png
│ │ ├── Log.png
│ │ ├── MedicalImaging.png
│ │ ├── Model.png
│ │ ├── N2.png
│ │ ├── Net.png
│ │ ├── Next.png
│ │ ├── Ok.png
│ │ ├── OpenFolder.png
│ │ ├── OpenImage.png
│ │ ├── Paddle.png
│ │ ├── Play.png
│ │ ├── Prev.png
│ │ ├── Propagate.png
│ │ ├── Qt.png
│ │ ├── Quit.png
│ │ ├── Redo.png
│ │ ├── RemoteSensing.png
│ │ ├── ReportBug.png
│ │ ├── Same.png
│ │ ├── Save.png
│ │ ├── SaveAs.png
│ │ ├── SaveCOCO.png
│ │ ├── SaveCutout.png
│ │ ├── SaveGrayScale.png
│ │ ├── SaveJson.png
│ │ ├── SaveLargestCC.png
│ │ ├── SavePseudoColor.png
│ │ ├── Setting.png
│ │ ├── Shortcut.png
│ │ ├── Show.png
│ │ ├── ShowRSPoly.png
│ │ ├── Stop.png
│ │ ├── Undo.png
│ │ ├── Use.png
│ │ ├── Video.png
│ │ ├── VideoAnno.png
│ │ ├── loading.gif
│ │ └── 中文.png
│ ├── run.py
│ ├── ui.py
│ ├── util
│ │ ├── __init__.py
│ │ ├── coco.py.bk
│ │ ├── coco
│ │ │ ├── __init__.py
│ │ │ ├── _mask.pyx
│ │ │ ├── coco.py
│ │ │ ├── cocoeval.py
│ │ │ ├── common
│ │ │ │ ├── gason.cpp
│ │ │ │ ├── gason.h
│ │ │ │ ├── maskApi.c
│ │ │ │ └── maskApi.h
│ │ │ └── mask.py
│ │ ├── colormap.py
│ │ ├── config.py
│ │ ├── exp_imports
│ │ │ └── default.py
│ │ ├── label.py
│ │ ├── language.py
│ │ ├── manager.py
│ │ ├── misc.py
│ │ ├── opath.py
│ │ ├── palette.py
│ │ ├── polygon.py
│ │ ├── qt.py
│ │ ├── regularization
│ │ │ ├── __init__.py
│ │ │ ├── cal_line.py
│ │ │ ├── cal_point.py
│ │ │ ├── rdp_alg.py
│ │ │ ├── rotate_ang.py
│ │ │ └── rs_regularization.py
│ │ ├── serialization.py
│ │ ├── translate
│ │ │ ├── Arabic.qm
│ │ │ └── English.qm
│ │ └── vis.py
│ └── widget
│ │ ├── __init__.py
│ │ ├── bbox.py
│ │ ├── create.py
│ │ ├── grip.py
│ │ ├── line.py
│ │ ├── loading.py
│ │ ├── polygon.py
│ │ ├── scene.py
│ │ ├── shortcut.py
│ │ ├── table.py
│ │ ├── view.py
│ │ └── vtk.py
├── init.sh
├── requirements-med.txt
├── requirements-rs.txt
├── requirements-video.txt
├── requirements.txt
├── setup.py
└── tool
│ ├── baidu_translate.py
│ ├── cut_video.py
│ ├── medical2video.py
│ ├── pypi.sh
│ ├── semantic2instance.py
│ ├── translate.pro
│ ├── translateUI.py
│ ├── ts
│ ├── Arabic.ts
│ └── English.ts
│ └── update_md5.py
├── LICENSE
├── Matting
├── README.md
├── README_CN.md
├── configs
│ ├── benchmarks
│ │ ├── Composition-1k
│ │ │ └── closeform_composition1k.yml
│ │ ├── Distinctions-646
│ │ │ └── closeform_distinctions646.yml
│ │ └── PPM
│ │ │ ├── README.md
│ │ │ ├── closeform.yml
│ │ │ ├── fast.yml
│ │ │ ├── knn.yml
│ │ │ ├── learningbased.yml
│ │ │ └── randomwalks.yml
│ ├── dim
│ │ └── dim-vgg16.yml
│ ├── human_matting
│ │ └── human_matting-resnet34_vd.yml
│ ├── modnet
│ │ ├── modnet-hrnet_w18.yml
│ │ ├── modnet-mobilenetv2.yml
│ │ └── modnet-resnet50_vd.yml
│ ├── ppmatting
│ │ ├── README.md
│ │ ├── ppmatting-hrnet_w18-human_1024.yml
│ │ ├── ppmatting-hrnet_w18-human_512.yml
│ │ ├── ppmatting-hrnet_w48-composition.yml
│ │ └── ppmatting-hrnet_w48-distinctions.yml
│ └── quick_start
│ │ └── modnet-mobilenetv2.yml
├── deploy
│ ├── human_matting_android_demo
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── README_CN.md
│ │ ├── app
│ │ │ ├── .gitignore
│ │ │ ├── build.gradle
│ │ │ ├── gradlew
│ │ │ ├── gradlew.bat
│ │ │ ├── local.properties
│ │ │ ├── proguard-rules.pro
│ │ │ └── src
│ │ │ │ ├── androidTest
│ │ │ │ └── java
│ │ │ │ │ └── com
│ │ │ │ │ └── baidu
│ │ │ │ │ └── paddle
│ │ │ │ │ └── lite
│ │ │ │ │ └── demo
│ │ │ │ │ └── ExampleInstrumentedTest.java
│ │ │ │ ├── main
│ │ │ │ ├── AndroidManifest.xml
│ │ │ │ ├── assets
│ │ │ │ │ └── image_matting
│ │ │ │ │ │ ├── images
│ │ │ │ │ │ ├── bg.jpg
│ │ │ │ │ │ └── human.jpg
│ │ │ │ │ │ └── labels
│ │ │ │ │ │ └── label_list
│ │ │ │ ├── java
│ │ │ │ │ └── com
│ │ │ │ │ │ └── paddle
│ │ │ │ │ │ └── demo
│ │ │ │ │ │ └── matting
│ │ │ │ │ │ ├── AppCompatPreferenceActivity.java
│ │ │ │ │ │ ├── MainActivity.java
│ │ │ │ │ │ ├── Predictor.java
│ │ │ │ │ │ ├── SettingsActivity.java
│ │ │ │ │ │ ├── Utils.java
│ │ │ │ │ │ ├── config
│ │ │ │ │ │ └── Config.java
│ │ │ │ │ │ ├── preprocess
│ │ │ │ │ │ └── Preprocess.java
│ │ │ │ │ │ └── visual
│ │ │ │ │ │ └── Visualize.java
│ │ │ │ └── res
│ │ │ │ │ ├── drawable-v24
│ │ │ │ │ └── ic_launcher_foreground.xml
│ │ │ │ │ ├── drawable
│ │ │ │ │ ├── ic_launcher_background.xml
│ │ │ │ │ └── paddle_logo.png
│ │ │ │ │ ├── layout
│ │ │ │ │ └── activity_main.xml
│ │ │ │ │ ├── menu
│ │ │ │ │ └── menu_action_options.xml
│ │ │ │ │ ├── mipmap-anydpi-v26
│ │ │ │ │ ├── ic_launcher.xml
│ │ │ │ │ └── ic_launcher_round.xml
│ │ │ │ │ ├── mipmap-hdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-mdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-xhdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-xxhdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── mipmap-xxxhdpi
│ │ │ │ │ ├── ic_launcher.png
│ │ │ │ │ └── ic_launcher_round.png
│ │ │ │ │ ├── values
│ │ │ │ │ ├── arrays.xml
│ │ │ │ │ ├── colors.xml
│ │ │ │ │ ├── strings.xml
│ │ │ │ │ └── styles.xml
│ │ │ │ │ └── xml
│ │ │ │ │ └── settings.xml
│ │ │ │ └── test
│ │ │ │ └── java
│ │ │ │ └── com
│ │ │ │ └── baidu
│ │ │ │ └── paddle
│ │ │ │ └── lite
│ │ │ │ └── demo
│ │ │ │ └── ExampleUnitTest.java
│ │ ├── build.gradle
│ │ ├── gradle.properties
│ │ ├── gradle
│ │ │ └── wrapper
│ │ │ │ ├── gradle-wrapper.jar
│ │ │ │ └── gradle-wrapper.properties
│ │ ├── gradlew
│ │ ├── gradlew.bat
│ │ └── settings.gradle
│ └── python
│ │ └── infer.py
├── ppmatting
│ ├── __init__.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── predict.py
│ │ ├── train.py
│ │ ├── val.py
│ │ └── val_ml.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── composition_1k.py
│ │ ├── distinctions_646.py
│ │ └── matting_dataset.py
│ ├── metrics
│ │ ├── __init__.py
│ │ └── metric.py
│ ├── ml
│ │ ├── __init__.py
│ │ └── methods.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbone
│ │ │ ├── __init__.py
│ │ │ ├── gca_enc.py
│ │ │ ├── hrnet.py
│ │ │ ├── mobilenet_v2.py
│ │ │ ├── resnet_vd.py
│ │ │ └── vgg.py
│ │ ├── dim.py
│ │ ├── gca.py
│ │ ├── human_matting.py
│ │ ├── layers
│ │ │ ├── __init__.py
│ │ │ └── gca_module.py
│ │ ├── losses
│ │ │ ├── __init__.py
│ │ │ └── loss.py
│ │ ├── modnet.py
│ │ └── ppmatting.py
│ ├── transforms
│ │ ├── __init__.py
│ │ └── transforms.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── estimate_foreground_ml.py
│ │ └── utils.py
├── requirements.txt
└── tools
│ ├── bg_replace.py
│ ├── export.py
│ ├── predict.py
│ ├── train.py
│ ├── update_vgg16_params.py
│ └── val.py
├── README.md
├── benchmark
├── README.md
├── README_CN.md
├── configs
│ ├── cityscapes_30imgs.yml
│ ├── fastscnn.yml
│ ├── ocrnet_hrnetw48.yml
│ └── segformer_b0.yml
├── deeplabv3p.yml
├── hrnet.yml
├── hrnet48.yml
├── run_all.sh
├── run_benchmark.sh
├── run_fp16.sh
└── run_fp32.sh
├── configs
├── README.md
├── README_cn.md
├── _base_
│ ├── ade20k.yml
│ ├── autonue.yml
│ ├── chase_db1.yml
│ ├── cityscapes.yml
│ ├── cityscapes_1024x1024.yml
│ ├── cityscapes_769x769.yml
│ ├── cityscapes_769x769_setr.yml
│ ├── coco_stuff.yml
│ ├── drive.yml
│ ├── hrf.yml
│ ├── pascal_context.yml
│ ├── pascal_voc12.yml
│ ├── pascal_voc12aug.yml
│ └── stare.yml
├── ann
│ ├── README.md
│ ├── ann_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── ann_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── ann_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── ann_resnet50_os8_voc12aug_512x512_40k.yml
├── attention_unet
│ ├── README.md
│ └── attention_unet_cityscapes_1024x512_80k.yml
├── bisenet
│ ├── README.md
│ └── bisenet_cityscapes_1024x1024_160k.yml
├── bisenetv1
│ ├── README.md
│ └── bisenetv1_resnet18_os8_cityscapes_1024x512_160k.yml
├── ccnet
│ ├── README.md
│ └── ccnet_resnet101_os8_cityscapes_769x769_60k.yml
├── danet
│ ├── README.md
│ ├── danet_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── danet_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── danet_resnet50_os8_voc12aug_512x512_40k.yml
├── ddrnet
│ ├── README.md
│ └── ddrnet23_cityscapes_1024x1024_120k.yml
├── decoupled_segnet
│ ├── README.md
│ ├── decoupledsegnet_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── decoupledsegnet_resnet50_os8_cityscapes_832x832_80k.yml
├── deeplabv3
│ ├── README.md
│ ├── deeplabv3_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── deeplabv3_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml
├── deeplabv3p
│ ├── README.md
│ ├── deeplabv3p_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml
│ ├── deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml
│ ├── deeplabv3p_resnet50_os8_cityscapes_1024x512_80k_rmiloss.yml
│ └── deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml
├── dmnet
│ ├── README.md
│ └── dmnet_resnet101_os8_cityscapes_1024x512_80k.yml
├── dnlnet
│ ├── README.md
│ ├── dnlnet_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── dnlnet_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── dnlnet_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── dnlnet_resnet50_os8_voc12aug_512x512_40k.yml
├── emanet
│ ├── README.md
│ ├── emanet_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── emanet_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── emanet_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── emanet_resnet50_os8_voc12aug_512x512_40k.yml
├── encnet
│ ├── README.md
│ └── encnet_resnet101_os8_cityscapes_1024x512_80k.yml
├── enet
│ ├── README.md
│ └── enet_cityscapes_1024x512_80k.yml
├── espnet
│ ├── README.md
│ └── espnet_cityscapes_1024x512_120k.yml
├── espnetv1
│ ├── README.md
│ └── espnetv1_cityscapes_1024x512_120k.yml
├── fastfcn
│ ├── README.md
│ └── fastfcn_resnet50_os8_ade20k_480x480_120k.yml
├── fastscnn
│ ├── README.md
│ ├── fastscnn_cityscapes_1024x1024_160k.yml
│ ├── fastscnn_cityscapes_1024x1024_40k.yml
│ └── fastscnn_cityscapes_1024x1024_40k_SCL.yml
├── fcn
│ ├── README.md
│ ├── fcn_hrnetw18_cityscapes_1024x512_80k.yml
│ ├── fcn_hrnetw18_cityscapes_1024x512_80k_bs4.yml
│ ├── fcn_hrnetw18_cityscapes_1024x512_80k_bs4_SCL.yml
│ ├── fcn_hrnetw18_pphumanseg14k.yml
│ ├── fcn_hrnetw18_voc12aug_512x512_40k.yml
│ ├── fcn_hrnetw48_cityscapes_1024x512_80k.yml
│ └── fcn_hrnetw48_voc12aug_512x512_40k.yml
├── gcnet
│ ├── README.md
│ ├── gcnet_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── gcnet_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── gcnet_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── gcnet_resnet50_os8_voc12aug_512x512_40k.yml
├── ginet
│ ├── README.md
│ ├── ginet_resnet101_os8_ade20k_520x520_150k.yml
│ ├── ginet_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── ginet_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── ginet_resnet50_os8_ade20k_520x520_150k.yml
│ ├── ginet_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── ginet_resnet50_os8_voc12aug_512x512_40k.yml
├── glore
│ ├── README.md
│ ├── glore_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── glore_resnet50_os8_voc12aug_512x512_40k.yml
├── gscnn
│ ├── README.md
│ └── gscnn_resnet50_os8_cityscapes_1024x512_80k.yml
├── hardnet
│ ├── README.md
│ └── hardnet_cityscapes_1024x1024_160k.yml
├── hrnet_w48_contrast
│ ├── HRNet_W48_contrast_cityscapes_1024x512_60k.yml
│ └── README.md
├── isanet
│ ├── README.md
│ ├── isanet_resnet101_os8_cityscapes_769x769_80k.yml
│ ├── isanet_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── isanet_resnet50_os8_cityscapes_769x769_80k.yml
│ └── isanet_resnet50_os8_voc12aug_512x512_40k.yml
├── lraspp
│ ├── README.md
│ ├── lraspp_mobilenetv3_cityscapes_1024x512_80k.yml
│ ├── lraspp_mobilenetv3_cityscapes_1024x512_80k_large_kernel.yml
│ └── lraspp_mobilenetv3_cityscapes_1024x512_80k_os32.yml
├── mobileseg
│ ├── README.md
│ ├── README_cn.md
│ ├── mobileseg_ghostnet_cityscapes_1024x512_80k.yml
│ ├── mobileseg_litehrnet18_cityscapes_1024x512_80k.yml
│ ├── mobileseg_mobilenetv2_cityscapes_1024x512_80k.yml
│ ├── mobileseg_mobilenetv3_cityscapes_1024x512_80k.yml
│ └── mobileseg_shufflenetv2_cityscapes_1024x512_80k.yml
├── ocrnet
│ ├── README.md
│ ├── ocrnet_hrnetw18_cityscapes_1024x512_160k.yml
│ ├── ocrnet_hrnetw18_cityscapes_1024x512_160k_lovasz_softmax.yml
│ ├── ocrnet_hrnetw18_road_extraction_768x768_15k.yml
│ ├── ocrnet_hrnetw18_road_extraction_768x768_15k_lovasz_hinge.yml
│ ├── ocrnet_hrnetw18_voc12aug_512x512_40k.yml
│ ├── ocrnet_hrnetw48_cityscapes_1024x512_160k.yml
│ ├── ocrnet_hrnetw48_cityscapes_1024x512_40k.yml
│ ├── ocrnet_hrnetw48_cityscapes_1024x512_40k_SCL.yml
│ └── ocrnet_hrnetw48_voc12aug_512x512_40k.yml
├── pfpn
│ ├── README.md
│ └── pfpn_resnet101_os8_cityscapes_512x1024_40k.yml
├── pointrend
│ ├── README.md
│ ├── pointrend_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── pointrend_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── pointrend_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── pointrend_resnet50_os8_voc12aug_512x512_40k.yml
├── portraitnet
│ ├── README.md
│ ├── portraitnet_eg1800_224x224_46k.yml
│ └── portraitnet_supervisely_224x224_60k.yml
├── pp_humanseg_lite
│ ├── README.md
│ ├── pp_humanseg_lite_export_398x224.yml
│ ├── pp_humanseg_lite_mini_supervisely.yml
│ └── pphumanseg_lite.png
├── pp_liteseg
│ ├── README.md
│ ├── pp_liteseg_stdc1_camvid_960x720_10k.yml
│ ├── pp_liteseg_stdc1_camvid_960x720_10k_for_test.yml
│ ├── pp_liteseg_stdc1_cityscapes_1024x512_scale0.5_160k.yml
│ ├── pp_liteseg_stdc1_cityscapes_1024x512_scale0.75_160k.yml
│ ├── pp_liteseg_stdc1_cityscapes_1024x512_scale1.0_160k.yml
│ ├── pp_liteseg_stdc2_camvid_960x720_10k.yml
│ ├── pp_liteseg_stdc2_camvid_960x720_10k_for_test.yml
│ ├── pp_liteseg_stdc2_cityscapes_1024x512_scale0.5_160k.yml
│ ├── pp_liteseg_stdc2_cityscapes_1024x512_scale0.75_160k.yml
│ └── pp_liteseg_stdc2_cityscapes_1024x512_scale1.0_160k.yml
├── pspnet
│ ├── README.md
│ ├── pspnet_resnet101_os8_cityscapes_1024x512_80k.yml
│ ├── pspnet_resnet101_os8_voc12aug_512x512_40k.yml
│ ├── pspnet_resnet50_os8_cityscapes_1024x512_80k.yml
│ └── pspnet_resnet50_os8_voc12aug_512x512_40k.yml
├── pssl
│ ├── README.md
│ ├── pp_liteseg_stdc1_pssl.yml
│ ├── pp_liteseg_stdc2_cityscapes_1024x512_scale1.0_160k_pssl.yml
│ ├── pp_liteseg_stdc2_pssl.yml
│ ├── stdc1_seg_pssl.yml
│ └── stdc2_seg_pssl.yml
├── quick_start
│ ├── bisenet_optic_disc_512x512_1k.yml
│ ├── deeplabv3p_resnet18_os8_optic_disc_512x512_1k_student.yml
│ ├── deeplabv3p_resnet50_os8_optic_disc_512x512_1k_teacher.yml
│ └── pp_liteseg_optic_disc_512x512_1k.yml
├── segformer
│ ├── README.md
│ ├── segformer_b0_cityscapes_1024x1024_160k.yml
│ ├── segformer_b0_cityscapes_1024x512_160k.yml
│ ├── segformer_b1_cityscapes_1024x1024_160k.yml
│ ├── segformer_b1_cityscapes_1024x512_160k.yml
│ ├── segformer_b2_cityscapes_1024x1024_160k.yml
│ ├── segformer_b2_cityscapes_1024x512_160k.yml
│ ├── segformer_b3_cityscapes_1024x1024_160k.yml
│ ├── segformer_b3_cityscapes_1024x512_160k.yml
│ ├── segformer_b4_cityscapes_1024x1024_160k.yml
│ ├── segformer_b4_cityscapes_1024x512_160k.yml
│ ├── segformer_b5_cityscapes_1024x1024_160k.yml
│ └── segformer_b5_cityscapes_1024x512_160k.yml
├── segmenter
│ ├── README.md
│ ├── segmenter_vit_base_linear_ade20k_512x512_160k.yml
│ ├── segmenter_vit_base_mask_ade20k_512x512_160k.yml
│ ├── segmenter_vit_small_linear_ade20k_512x512_160k.yml
│ └── segmenter_vit_small_mask_ade20k_512x512_160k.yml
├── segnet
│ ├── README.md
│ └── segnet_cityscapes_1024x512_80k.yml
├── setr
│ ├── README.md
│ ├── setr_mla_large_cityscapes_769x769_40k.yml
│ ├── setr_naive_large_cityscapes_769x769_40k.yml
│ └── setr_pup_large_cityscapes_769x769_40k.yml
├── sfnet
│ ├── README.md
│ ├── sfnet_resnet18_os8_cityscapes_1024x1024_80k.yml
│ └── sfnet_resnet50_os8_cityscapes_1024x1024_80k.yml
├── smrt
│ ├── README.md
│ ├── base_cfg.yml
│ ├── bisenetv2.yml
│ ├── deeplabv3p_resnet50_os8.yml
│ ├── ocrnet_hrnetw18.yml
│ ├── pp_liteseg_stdc1.yml
│ ├── pp_liteseg_stdc2.yml
│ └── sfnet_resnet18_os8.yml
├── stdcseg
│ ├── README.md
│ ├── stdc1_seg_cityscapes_1024x512_80k.yml
│ ├── stdc1_seg_voc12aug_512x512_40k.yml
│ ├── stdc2_seg_cityscapes_1024x512_80k.yml
│ └── stdc2_seg_voc12aug_512x512_40k.yml
├── u2net
│ ├── README.md
│ ├── u2net_cityscapes_1024x512_160k.yml
│ └── u2netp_cityscapes_1024x512_160k.yml
├── unet
│ ├── README.md
│ ├── unet_chasedb1_128x128_40k.yml
│ ├── unet_cityscapes_1024x512_160k.yml
│ ├── unet_drive_128x128_40k.yml
│ ├── unet_hrf_256x256_40k.yml
│ └── unet_stare_128x128_40k.yml
├── unet_3plus
│ ├── README.md
│ └── unet_3plus_cityscapes_1024x512_160k.yml
├── unet_plusplus
│ ├── README.md
│ └── unet_plusplus_cityscapes_1024x512_160k.yml
└── upernet
│ ├── README.md
│ └── upernet_resnet101_os8_cityscapes_512x1024_40k.yml
├── contrib
├── AutoNUE
│ ├── README.md
│ ├── configs
│ │ ├── auto_nue_auto_label.yml
│ │ ├── auto_nue_map+city_crop.yml
│ │ ├── mscale_auto_nue_map+city@1920.yml
│ │ ├── sscale_auto_nue_map+city@1920.yml
│ │ └── swin_transformer_mla_base_patch4_window7_160k_autonue.yml
│ ├── core
│ │ ├── __init__.py
│ │ ├── infer.py
│ │ ├── infer_crop.py
│ │ ├── infer_ensemble.py
│ │ ├── infer_ensemble_three.py
│ │ ├── infer_generate_autolabel.py
│ │ ├── predict_ensemble.py
│ │ ├── predict_ensemble_three.py
│ │ ├── predict_generate_autolabel.py
│ │ ├── val.py
│ │ └── val_crop.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── auto_nue.py
│ │ ├── auto_nue_autolabel.py
│ │ └── auto_nue_crop.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbones
│ │ │ └── hrnet_nv.py
│ │ ├── mscale_ocrnet.py
│ │ └── ocrnet_nv.py
│ ├── predict.py
│ ├── predict_ensemble.py
│ ├── predict_ensemble_three.py
│ ├── scripts
│ │ └── train.py
│ ├── tools
│ │ └── IDD_labeling.py
│ ├── train.py
│ └── val.py
├── CityscapesSOTA
│ ├── README.md
│ ├── configs
│ │ ├── README.md
│ │ ├── mscale_ocr_cityscapes_autolabel_mapillary.yml
│ │ └── mscale_ocr_cityscapes_autolabel_mapillary_ms_val.yml
│ ├── datasets
│ │ ├── __init__.py
│ │ └── cityscapes_autolabeling.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbones
│ │ │ └── hrnet_nv.py
│ │ ├── mscale_ocrnet.py
│ │ └── ocrnet_nv.py
│ ├── predict.py
│ ├── scripts
│ │ └── train.py
│ ├── tools
│ │ ├── cityscapes_labels.py
│ │ └── convert_cityscapes_autolabeling.py
│ ├── train.py
│ └── val.py
├── DomainAdaptation
│ ├── README.md
│ ├── configs
│ │ └── deeplabv2
│ │ │ ├── deeplabv2_resnet101_os8_gta5cityscapes_1280x640_160k_newds_edge_rec.yml
│ │ │ ├── deeplabv2_resnet101_os8_gta5cityscapes_1280x640_160k_newds_edgestream.yml
│ │ │ ├── deeplabv2_resnet101_os8_gta5cityscapes_1280x640_160k_newds_featpullin.yml
│ │ │ ├── deeplabv2_resnet101_os8_gta5cityscapes_1280x640_160k_newds_gta5src.yml
│ │ │ └── deeplabv2_resnet101_os8_gta5cityscapes_1280x640_160k_newds_sfnet.yml
│ ├── cvlibs
│ │ ├── __init__.py
│ │ └── config.py
│ ├── datasets
│ │ ├── __init__.py
│ │ ├── cityscapes_noconfig.py
│ │ ├── gta5_noconfig.py
│ │ └── synthia.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── backbones
│ │ │ ├── __init__.py
│ │ │ └── resnet.py
│ │ ├── deeplabv2.py
│ │ ├── ema.py
│ │ └── gscnn.py
│ ├── requirements.txt
│ ├── run-DA_src.sh
│ ├── script
│ │ ├── __init__.py
│ │ ├── train.py
│ │ └── val.py
│ ├── train.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── augmentation.py
│ │ ├── config_check.py
│ │ └── utils.py
│ └── val.py
├── LaneSeg
│ ├── README.md
│ ├── README_CN.md
│ ├── configs
│ │ ├── bisenetV2_tusimple_640x368_300k.yml
│ │ └── fastscnn_tusimple_640x368_300k.yml
│ ├── core
│ │ ├── __init__.py
│ │ ├── infer.py
│ │ ├── predict.py
│ │ ├── train.py
│ │ └── val.py
│ ├── data
│ │ ├── images
│ │ │ ├── added_prediction
│ │ │ │ └── 3.jpg
│ │ │ ├── points
│ │ │ │ └── 3.jpg
│ │ │ └── pseudo_color_prediction
│ │ │ │ └── 3.png
│ │ └── test_images
│ │ │ └── 3.jpg
│ ├── datasets
│ │ ├── __init__.py
│ │ └── tusimple.py
│ ├── deploy
│ │ ├── cpp
│ │ │ ├── CMakeLists.txt
│ │ │ ├── README.md
│ │ │ ├── README_CN.md
│ │ │ ├── run_seg_cpu.sh
│ │ │ ├── run_seg_gpu.sh
│ │ │ └── src
│ │ │ │ ├── lane_postprocess.cpp
│ │ │ │ ├── lane_postprocess.hpp
│ │ │ │ └── test_seg.cc
│ │ └── python
│ │ │ └── infer.py
│ ├── export.py
│ ├── losses
│ │ ├── __init__.py
│ │ └── lane_cross_entropy_loss.py
│ ├── predict.py
│ ├── third_party
│ │ ├── __init__.py
│ │ ├── generate_tusimple_dataset.py
│ │ ├── get_lane_coords.py
│ │ ├── lane.py
│ │ └── tusimple_processor.py
│ ├── train.py
│ ├── transforms
│ │ ├── __init__.py
│ │ └── lane_transforms.py
│ └── val.py
├── MedicalSeg
│ ├── .gitignore
│ ├── .pre-commit-config.yaml
│ ├── LICENSE
│ ├── README.md
│ ├── README_CN.md
│ ├── configs
│ │ ├── _base_
│ │ │ └── global_configs.yml
│ │ ├── lung_coronavirus
│ │ │ ├── README.md
│ │ │ ├── lung_coronavirus.yml
│ │ │ └── vnet_lung_coronavirus_128_128_128_15k.yml
│ │ ├── mri_spine_seg
│ │ │ ├── README.md
│ │ │ ├── mri_spine_seg_1e-1_big_rmresizecrop.yml
│ │ │ ├── mri_spine_seg_1e-1_big_rmresizecrop_class20.yml
│ │ │ ├── vnet_mri_spine_seg_512_512_12_15k.yml
│ │ │ └── vnetdeepsup_mri_spine_seg_512_512_12_15k.yml
│ │ ├── msd_brain_seg
│ │ │ ├── README.md
│ │ │ ├── msd_brain_seg_1e-4.yml
│ │ │ └── unetr_msd_brain_seg_1e-4.yml
│ │ └── schedulers
│ │ │ └── two_stage_coarseseg_fineseg.yml
│ ├── deploy
│ │ └── python
│ │ │ ├── README.md
│ │ │ └── infer.py
│ ├── documentation
│ │ ├── tutorial.md
│ │ └── tutorial_cn.md
│ ├── export.py
│ ├── medicalseg
│ │ ├── __init__.py
│ │ ├── core
│ │ │ ├── __init__.py
│ │ │ ├── infer.py
│ │ │ ├── train.py
│ │ │ └── val.py
│ │ ├── cvlibs
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ └── manager.py
│ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── dataset.py
│ │ │ ├── lung_coronavirus.py
│ │ │ ├── mri_spine_seg.py
│ │ │ └── msd_brain_seg.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── losses
│ │ │ │ ├── __init__.py
│ │ │ │ ├── binary_cross_entropy_loss.py
│ │ │ │ ├── cross_entropy_loss.py
│ │ │ │ ├── dice_loss.py
│ │ │ │ ├── loss_utils.py
│ │ │ │ └── mixes_losses.py
│ │ │ ├── unetr.py
│ │ │ ├── vnet.py
│ │ │ └── vnet_deepsup.py
│ │ ├── transforms
│ │ │ ├── __init__.py
│ │ │ ├── functional.py
│ │ │ └── transform.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── config_check.py
│ │ │ ├── download.py
│ │ │ ├── env_util
│ │ │ ├── __init__.py
│ │ │ ├── seg_env.py
│ │ │ └── sys_env.py
│ │ │ ├── logger.py
│ │ │ ├── loss_utils.py
│ │ │ ├── metric.py
│ │ │ ├── op_flops_run.py
│ │ │ ├── progbar.py
│ │ │ ├── timer.py
│ │ │ ├── train_profiler.py
│ │ │ ├── utils.py
│ │ │ └── visualize.py
│ ├── requirements.txt
│ ├── run-vnet-mri.sh
│ ├── run-vnet.sh
│ ├── test.py
│ ├── test_tipc
│ │ ├── README.md
│ │ ├── common_func.sh
│ │ ├── configs
│ │ │ └── unetr
│ │ │ │ ├── msd_brain_test.yml
│ │ │ │ └── train_infer_python.txt
│ │ ├── prepare.sh
│ │ └── test_train_inference_python.sh
│ ├── tools
│ │ ├── __init__.py
│ │ ├── prepare.py
│ │ ├── prepare_lung_coronavirus.py
│ │ ├── prepare_mri_spine_seg.py
│ │ ├── prepare_msd.py
│ │ ├── prepare_msd_brain_seg.py
│ │ ├── prepare_prostate.py
│ │ ├── preprocess_globals.yml
│ │ └── preprocess_utils
│ │ │ ├── __init__.py
│ │ │ ├── dataset_json.py
│ │ │ ├── geometry.py
│ │ │ ├── global_var.py
│ │ │ ├── load_image.py
│ │ │ ├── uncompress.py
│ │ │ └── values.py
│ ├── train.py
│ ├── val.py
│ └── visualize.ipynb
├── PP-HumanSeg
│ ├── README.md
│ ├── README_cn.md
│ ├── configs
│ │ ├── human_pp_humansegv1_lite.yml
│ │ ├── human_pp_humansegv1_mobile.yml
│ │ ├── human_pp_humansegv1_server.yml
│ │ ├── human_pp_humansegv2_lite.yml
│ │ ├── human_pp_humansegv2_mobile.yml
│ │ ├── portrait_pp_humansegv1_lite.yml
│ │ └── portrait_pp_humansegv2_lite.yml
│ ├── paper.md
│ └── src
│ │ ├── __init__.py
│ │ ├── download_data.py
│ │ ├── download_inference_models.py
│ │ ├── download_pretrained_models.py
│ │ ├── infer.py
│ │ ├── optic_flow_process.py
│ │ └── seg_demo.py
└── PanopticDeepLab
│ ├── README.md
│ ├── README_CN.md
│ ├── configs
│ ├── _base_
│ │ └── cityscapes_panoptic.yml
│ └── panoptic_deeplab
│ │ ├── panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml
│ │ └── panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml
│ ├── core
│ ├── __init__.py
│ ├── infer.py
│ ├── predict.py
│ ├── train.py
│ └── val.py
│ ├── datasets
│ ├── __init__.py
│ └── cityscapes_panoptic.py
│ ├── docs
│ ├── panoptic_deeplab.jpg
│ ├── visualization_instance.png
│ ├── visualization_panoptic.png
│ └── visualization_semantic.png
│ ├── models
│ ├── __init__.py
│ └── panoptic_deeplab.py
│ ├── predict.py
│ ├── train.py
│ ├── transforms
│ ├── __init__.py
│ └── target_transforms.py
│ ├── utils
│ ├── __init__.py
│ ├── evaluation
│ │ ├── __init__.py
│ │ ├── instance.py
│ │ ├── panoptic.py
│ │ └── semantic.py
│ └── visualize.py
│ └── val.py
├── deploy
├── cpp
│ ├── CMakeLists.txt
│ ├── README.md
│ ├── README_cn.md
│ ├── run_seg_cpu.sh
│ ├── run_seg_gpu.sh
│ ├── run_seg_gpu_trt.sh
│ ├── run_seg_gpu_trt_dynamic_shape.sh
│ └── src
│ │ └── test_seg.cc
├── lite
│ ├── README.md
│ ├── example
│ │ ├── human_1.png
│ │ ├── human_2.png
│ │ └── human_3.png
│ └── human_segmentation_demo
│ │ ├── .gitignore
│ │ ├── app
│ │ ├── .gitignore
│ │ ├── build.gradle
│ │ ├── gradlew
│ │ ├── gradlew.bat
│ │ ├── local.properties
│ │ ├── proguard-rules.pro
│ │ └── src
│ │ │ ├── androidTest
│ │ │ └── java
│ │ │ │ └── com
│ │ │ │ └── baidu
│ │ │ │ └── paddle
│ │ │ │ └── lite
│ │ │ │ └── demo
│ │ │ │ └── ExampleInstrumentedTest.java
│ │ │ ├── main
│ │ │ ├── AndroidManifest.xml
│ │ │ ├── assets
│ │ │ │ └── image_segmentation
│ │ │ │ │ ├── images
│ │ │ │ │ └── human.jpg
│ │ │ │ │ └── labels
│ │ │ │ │ └── label_list
│ │ │ ├── java
│ │ │ │ └── com
│ │ │ │ │ └── baidu
│ │ │ │ │ └── paddle
│ │ │ │ │ └── lite
│ │ │ │ │ └── demo
│ │ │ │ │ └── segmentation
│ │ │ │ │ ├── AppCompatPreferenceActivity.java
│ │ │ │ │ ├── MainActivity.java
│ │ │ │ │ ├── Predictor.java
│ │ │ │ │ ├── SettingsActivity.java
│ │ │ │ │ ├── Utils.java
│ │ │ │ │ ├── config
│ │ │ │ │ └── Config.java
│ │ │ │ │ ├── preprocess
│ │ │ │ │ └── Preprocess.java
│ │ │ │ │ └── visual
│ │ │ │ │ └── Visualize.java
│ │ │ └── res
│ │ │ │ ├── drawable-v24
│ │ │ │ └── ic_launcher_foreground.xml
│ │ │ │ ├── drawable
│ │ │ │ └── ic_launcher_background.xml
│ │ │ │ ├── layout
│ │ │ │ └── activity_main.xml
│ │ │ │ ├── menu
│ │ │ │ └── menu_action_options.xml
│ │ │ │ ├── mipmap-anydpi-v26
│ │ │ │ ├── ic_launcher.xml
│ │ │ │ └── ic_launcher_round.xml
│ │ │ │ ├── mipmap-hdpi
│ │ │ │ ├── ic_launcher.png
│ │ │ │ └── ic_launcher_round.png
│ │ │ │ ├── mipmap-mdpi
│ │ │ │ ├── ic_launcher.png
│ │ │ │ └── ic_launcher_round.png
│ │ │ │ ├── mipmap-xhdpi
│ │ │ │ ├── ic_launcher.png
│ │ │ │ └── ic_launcher_round.png
│ │ │ │ ├── mipmap-xxhdpi
│ │ │ │ ├── ic_launcher.png
│ │ │ │ └── ic_launcher_round.png
│ │ │ │ ├── mipmap-xxxhdpi
│ │ │ │ ├── ic_launcher.png
│ │ │ │ └── ic_launcher_round.png
│ │ │ │ ├── values
│ │ │ │ ├── arrays.xml
│ │ │ │ ├── colors.xml
│ │ │ │ ├── strings.xml
│ │ │ │ └── styles.xml
│ │ │ │ └── xml
│ │ │ │ └── settings.xml
│ │ │ └── test
│ │ │ └── java
│ │ │ └── com
│ │ │ └── baidu
│ │ │ └── paddle
│ │ │ └── lite
│ │ │ └── demo
│ │ │ └── ExampleUnitTest.java
│ │ ├── build.gradle
│ │ ├── gradle.properties
│ │ ├── gradle
│ │ └── wrapper
│ │ │ ├── gradle-wrapper.jar
│ │ │ └── gradle-wrapper.properties
│ │ ├── gradlew
│ │ ├── gradlew.bat
│ │ └── settings.gradle
├── onnxruntime_cpp
│ ├── CMakeLists.txt
│ ├── README.md
│ └── src
│ │ ├── ort_session_handler.cpp
│ │ ├── ort_session_handler.hpp
│ │ └── test_seg.cpp
├── python
│ ├── README.md
│ ├── collect_dynamic_shape.py
│ ├── infer.py
│ ├── infer_benchmark.py
│ ├── infer_dataset.py
│ ├── infer_onnx.py
│ └── infer_onnx_trt.py
├── serving
│ ├── README.md
│ └── test_serving.py
└── web
│ ├── README.md
│ └── example
│ ├── bg
│ └── bg.jpg
│ ├── index.html
│ ├── index.ts
│ ├── package.json
│ ├── tsconfig.json
│ └── webpack.config.js
├── docs
├── Makefile
├── README.md
├── add_new_model.md
├── api_example.md
├── api_example_cn.md
├── apis
│ ├── README.md
│ ├── README_CN.md
│ ├── backbones.md
│ ├── backbones
│ │ ├── backbones.md
│ │ ├── backbones_cn.md
│ │ └── index.rst
│ ├── core.md
│ ├── core
│ │ ├── core.md
│ │ ├── core_cn.md
│ │ └── index.rst
│ ├── cvlibs.md
│ ├── cvlibs
│ │ ├── cvlibs.md
│ │ ├── cvlibs_cn.md
│ │ └── index.rst
│ ├── datasets.md
│ ├── datasets
│ │ ├── datasets.md
│ │ └── datasets_cn.md
│ ├── index.rst
│ ├── losses.md
│ ├── losses
│ │ ├── index.rst
│ │ ├── losses.md
│ │ └── losses_cn.md
│ ├── models.md
│ ├── models
│ │ ├── index.rst
│ │ ├── models.md
│ │ └── models_cn.md
│ ├── transforms.md
│ └── transforms
│ │ ├── index.rst
│ │ ├── transforms.md
│ │ └── transforms_cn.md
├── conf.py
├── data
│ ├── README.md
│ ├── custom
│ │ ├── data_prepare.md
│ │ ├── data_prepare_cn.md
│ │ └── index.rst
│ ├── image
│ │ ├── ITK-SNAP.png
│ │ ├── LabelMeing.png
│ │ ├── file_list.png
│ │ ├── file_list2.png
│ │ ├── image-1.png
│ │ ├── image-10.jpg
│ │ ├── image-11.png
│ │ ├── image-2.png
│ │ ├── image-3.png
│ │ ├── image-4-1.png
│ │ ├── image-4-2.png
│ │ ├── image-5.png
│ │ ├── image-6-2.png
│ │ ├── image-6.png
│ │ ├── image-7.png
│ │ ├── jingling-1.png
│ │ ├── jingling-2.png
│ │ ├── jingling-3.png
│ │ ├── jingling-4.png
│ │ ├── jingling-5.png
│ │ └── labelme_polygons.jpg
│ ├── marker
│ │ ├── LabelMe.md
│ │ ├── LabelMe_cn.md
│ │ ├── index.rst
│ │ ├── marker.md
│ │ └── marker_cn.md
│ ├── pre_data.md
│ ├── pre_data_cn.md
│ └── transform
│ │ ├── index.rst
│ │ ├── transform.md
│ │ └── transform_cn.md
├── deployment
│ ├── inference
│ │ ├── cpp_inference.md
│ │ ├── cpp_inference_cn.md
│ │ ├── index.rst
│ │ ├── infer_benchmark.md
│ │ ├── infer_benchmark_cn.md
│ │ ├── inference.md
│ │ ├── inference_cn.md
│ │ ├── python_inference.md
│ │ └── python_inference_cn.md
│ ├── lite
│ │ ├── example
│ │ │ ├── human.png
│ │ │ ├── human_1.png
│ │ │ ├── human_2.png
│ │ │ └── human_3.png
│ │ ├── index.rst
│ │ ├── lite.md
│ │ └── lite_cn.md
│ ├── serving
│ │ ├── index.rst
│ │ ├── serving.md
│ │ └── serving_cn.md
│ └── web
│ │ ├── image
│ │ └── figure1.png
│ │ ├── index.rst
│ │ ├── web.md
│ │ └── web_cn.md
├── design
│ ├── create
│ │ ├── add_new_model.md
│ │ ├── add_new_model_cn.md
│ │ └── index.rst
│ └── use
│ │ ├── index.rst
│ │ ├── use.md
│ │ └── use_cn.md
├── evaluation
│ ├── evaluate.md
│ ├── evaluate_cn.md
│ └── index.rst
├── faq
│ └── faq
│ │ ├── faq.md
│ │ ├── faq_cn.md
│ │ ├── faq_imgs
│ │ └── ann_config.png
│ │ └── index.rst
├── images
│ ├── Lovasz_Hinge_Evaluate_mIoU.png
│ ├── Lovasz_Softmax_Evaluate_mIoU.png
│ ├── QQ_chat.png
│ ├── activate.png
│ ├── anli.png
│ ├── api_fig1.png
│ ├── api_fig2.png
│ ├── chat.png
│ ├── cityscapes_predict_demo.png
│ ├── deepglobe.png
│ ├── eiseg_demo.gif
│ ├── f1.png
│ ├── f2.png
│ ├── f3.png
│ ├── feature.png
│ ├── fig1.png
│ ├── fig2.png
│ ├── fig3.png
│ ├── fig4.png
│ ├── fig5.png
│ ├── human.png
│ ├── interactive.gif
│ ├── love.png
│ ├── model.png
│ ├── optic_test_image.jpg
│ ├── paddleseg_logo.png
│ ├── quick_start_predict.jpg
│ ├── quick_start_vdl.jpg
│ ├── readme
│ │ ├── 二次元.gif
│ │ ├── 人体解析.gif
│ │ ├── 人像分割-0.gif
│ │ └── 人像分割.gif
│ ├── seg_news_icon.png
│ ├── teach.png
│ └── yinyong.png
├── index.rst
├── install.md
├── install_cn.md
├── loss_usage.md
├── make.bat
├── model_export.md
├── model_export_cn.md
├── model_export_onnx.md
├── model_export_onnx_cn.md
├── model_zoo_overview.md
├── model_zoo_overview_cn.md
├── models
│ ├── deeplabv3.md
│ ├── deeplabv3_cn.md
│ ├── fascnn.md
│ ├── fascnn_cn.md
│ ├── images
│ │ ├── Fast-SCNN.png
│ │ ├── OCRNet.png
│ │ ├── UNet.png
│ │ ├── convolution.png
│ │ └── deeplabv3+.png
│ ├── index.rst
│ ├── ocrnet.md
│ ├── ocrnet_cn.md
│ ├── unet.md
│ └── unet_cn.md
├── module
│ ├── data
│ │ ├── data.md
│ │ ├── data_cn.md
│ │ └── index.rst
│ ├── images
│ │ ├── Lovasz_Hinge_Evaluate_mIoU.png
│ │ ├── Lovasz_Softmax_Evaluate_mIoU.png
│ │ ├── VOC2012.png
│ │ ├── annotation
│ │ │ ├── image-1.png
│ │ │ ├── image-10.jpg
│ │ │ ├── image-11.png
│ │ │ ├── image-2.png
│ │ │ ├── image-3.png
│ │ │ ├── image-4-1.png
│ │ │ ├── image-4-2.png
│ │ │ ├── image-5.png
│ │ │ ├── image-6-2.png
│ │ │ ├── image-6.png
│ │ │ ├── image-7.png
│ │ │ ├── jingling-1.png
│ │ │ ├── jingling-2.png
│ │ │ ├── jingling-3.png
│ │ │ ├── jingling-4.png
│ │ │ └── jingling-5.png
│ │ ├── aug_method.png
│ │ ├── cityscapes.png
│ │ ├── cityscapes_predict_demo.png
│ │ ├── cosine_decay_example.png
│ │ ├── data_aug_example.png
│ │ ├── data_aug_flip_mirror.png
│ │ ├── data_aug_flow.png
│ │ ├── deepglobe.png
│ │ ├── deeplabv3p.png
│ │ ├── dice.png
│ │ ├── dice2.png
│ │ ├── dice3.png
│ │ ├── fast-scnn.png
│ │ ├── file_list.png
│ │ ├── file_list2.png
│ │ ├── gn.png
│ │ ├── hrnet.png
│ │ ├── icnet.png
│ │ ├── image-10.jpg
│ │ ├── loss_comparison.png
│ │ ├── lovasz-hinge-vis.png
│ │ ├── lovasz-hinge.png
│ │ ├── lovasz-softmax.png
│ │ ├── optic_test_image.jpg
│ │ ├── piecewise_decay_example.png
│ │ ├── poly_decay_example.png
│ │ ├── pspnet.png
│ │ ├── pspnet2.png
│ │ ├── qq_group2.png
│ │ ├── quick_start_predict.jpg
│ │ ├── quick_start_vdl.jpg
│ │ ├── rangescale.png
│ │ ├── seg_news_icon.png
│ │ ├── softmax_loss.png
│ │ ├── unet.png
│ │ ├── usage_vis_demo.jpg
│ │ ├── visualdl_image.png
│ │ ├── visualdl_scalar.png
│ │ └── warmup_with_poly_decay_example.png
│ ├── index.rst
│ ├── loss
│ │ ├── BCELoss_cn.md
│ │ ├── BCELoss_en.md
│ │ ├── BootstrappedCrossEntropyLoss_cn.md
│ │ ├── BootstrappedCrossEntropyLoss_en.md
│ │ ├── CrossEntropyLoss_cn.md
│ │ ├── CrossEntropyLoss_en.md
│ │ ├── DiceLoss_cn.md
│ │ ├── DiceLoss_en.md
│ │ ├── DualTaskLoss_cn.md
│ │ ├── DualTaskLoss_en.md
│ │ ├── EdgeAttentionLoss_cn.md
│ │ ├── EdgeAttentionLoss_en.md
│ │ ├── L1Loss_cn.md
│ │ ├── L1Loss_en.md
│ │ ├── LovaszHingeLoss_cn.md
│ │ ├── LovaszHingeLoss_en.md
│ │ ├── LovaszSoftmaxLoss_cn.md
│ │ ├── LovaszSoftmaxLoss_en.md
│ │ ├── MSELoss_cn.md
│ │ ├── MSELoss_en.md
│ │ ├── MixedLoss_cn.md
│ │ ├── MixedLoss_en.md
│ │ ├── OhemCrossEntropyLoss_cn.md
│ │ ├── OhemCrossEntropyLoss_en.md
│ │ ├── OhemEdgeAttentionLoss_cn.md
│ │ ├── OhemEdgeAttentionLoss_en.md
│ │ ├── RelaxBoundaryLoss_cn.md
│ │ ├── RelaxBoundaryLoss_en.md
│ │ ├── SemanticConnectivityLoss_cn.md
│ │ ├── SemanticConnectivityLoss_en.md
│ │ ├── index.rst
│ │ ├── losses_cn.md
│ │ ├── losses_en.md
│ │ ├── lovasz_loss_cn.md
│ │ └── lovasz_loss_en.md
│ └── tricks
│ │ ├── index.rst
│ │ └── tricks.md
├── paddleseg.png
├── pr
│ ├── images
│ │ ├── 001_fork.png
│ │ ├── 002_clone.png
│ │ ├── 003_precommit_pass.png
│ │ └── 004_create_pr.png
│ └── pr
│ │ ├── index.rst
│ │ ├── pr.md
│ │ ├── pr_cn.md
│ │ ├── style.md
│ │ └── style_cn.md
├── predict
│ ├── color_map
│ │ ├── after_mapped.jpeg
│ │ └── before_mapped.jpeg
│ ├── predict.md
│ └── predict_cn.md
├── quick_start.md
├── quick_start_cn.md
├── release_notes.md
├── release_notes_cn.md
├── requirements.txt
├── slim
│ ├── distill
│ │ ├── distill.md
│ │ ├── distill_cn.md
│ │ └── index.rst
│ ├── prune
│ │ ├── index.rst
│ │ ├── prune.md
│ │ └── prune_cn.md
│ └── quant
│ │ ├── index.rst
│ │ ├── quant.md
│ │ └── quant_cn.md
├── static
│ ├── static.md
│ └── static_cn.md
├── train
│ ├── index.rst
│ ├── train.md
│ └── train_cn.md
├── whole_process.md
└── whole_process_cn.md
├── export.py
├── myconfig
├── rs.yml
└── segformer-b2-rs.yml
├── paddleseg
├── __init__.py
├── core
│ ├── __init__.py
│ ├── infer.py
│ ├── predict.py
│ ├── train.py
│ └── val.py
├── cvlibs
│ ├── __init__.py
│ ├── callbacks.py
│ ├── config.py
│ ├── manager.py
│ └── param_init.py
├── datasets
│ ├── __init__.py
│ ├── ade.py
│ ├── chase_db1.py
│ ├── cityscapes.py
│ ├── cocostuff.py
│ ├── dataset.py
│ ├── drive.py
│ ├── eg1800.py
│ ├── hrf.py
│ ├── mini_deep_globe_road_extraction.py
│ ├── mydataset.py
│ ├── optic_disc_seg.py
│ ├── pascal_context.py
│ ├── pp_humanseg14k.py
│ ├── pssl.py
│ ├── stare.py
│ ├── supervisely.py
│ └── voc.py
├── models
│ ├── __init__.py
│ ├── ann.py
│ ├── attention_unet.py
│ ├── backbones
│ │ ├── __init__.py
│ │ ├── ghostnet.py
│ │ ├── hrnet.py
│ │ ├── lite_hrnet.py
│ │ ├── mix_transformer.py
│ │ ├── mobilenetv2.py
│ │ ├── mobilenetv3.py
│ │ ├── resnet_vd.py
│ │ ├── shufflenetv2.py
│ │ ├── stdcnet.py
│ │ ├── swin_transformer.py
│ │ ├── transformer_utils.py
│ │ ├── vision_transformer.py
│ │ └── xception_deeplab.py
│ ├── bisenet.py
│ ├── bisenetv1.py
│ ├── ccnet.py
│ ├── danet.py
│ ├── ddrnet.py
│ ├── decoupled_segnet.py
│ ├── deeplab.py
│ ├── dmnet.py
│ ├── dnlnet.py
│ ├── emanet.py
│ ├── encnet.py
│ ├── enet.py
│ ├── espnet.py
│ ├── espnetv1.py
│ ├── fast_scnn.py
│ ├── fastfcn.py
│ ├── fcn.py
│ ├── gcnet.py
│ ├── ginet.py
│ ├── glore.py
│ ├── gscnn.py
│ ├── hardnet.py
│ ├── hrnet_contrast.py
│ ├── isanet.py
│ ├── layers
│ │ ├── __init__.py
│ │ ├── activation.py
│ │ ├── attention.py
│ │ ├── layer_libs.py
│ │ ├── nonlocal2d.py
│ │ ├── pyramid_pool.py
│ │ ├── tensor_fusion.py
│ │ ├── tensor_fusion_helper.py
│ │ └── wrap_functions.py
│ ├── losses
│ │ ├── __init__.py
│ │ ├── binary_cross_entropy_loss.py
│ │ ├── bootstrapped_cross_entropy.py
│ │ ├── cross_entropy_loss.py
│ │ ├── decoupledsegnet_relax_boundary_loss.py
│ │ ├── detail_aggregate_loss.py
│ │ ├── dice_loss.py
│ │ ├── edge_attention_loss.py
│ │ ├── focal_loss.py
│ │ ├── gscnn_dual_task_loss.py
│ │ ├── kl_loss.py
│ │ ├── l1_loss.py
│ │ ├── lovasz_loss.py
│ │ ├── mean_square_error_loss.py
│ │ ├── mixed_loss.py
│ │ ├── ohem_cross_entropy_loss.py
│ │ ├── ohem_edge_attention_loss.py
│ │ ├── pixel_contrast_cross_entropy_loss.py
│ │ ├── point_cross_entropy_loss.py
│ │ ├── rmi_loss.py
│ │ ├── semantic_connectivity_loss.py
│ │ └── semantic_encode_cross_entropy_loss.py
│ ├── lraspp.py
│ ├── mla_transformer.py
│ ├── mobileseg.py
│ ├── ocrnet.py
│ ├── pfpnnet.py
│ ├── pointrend.py
│ ├── portraitnet.py
│ ├── pp_liteseg.py
│ ├── pphumanseg_lite.py
│ ├── pspnet.py
│ ├── segformer.py
│ ├── segmenter.py
│ ├── segnet.py
│ ├── setr.py
│ ├── sfnet.py
│ ├── sinet.py
│ ├── stdcseg.py
│ ├── u2net.py
│ ├── unet.py
│ ├── unet_3plus.py
│ ├── unet_plusplus.py
│ └── upernet.py
├── transforms
│ ├── __init__.py
│ ├── functional.py
│ └── transforms.py
└── utils
│ ├── __init__.py
│ ├── config_check.py
│ ├── download.py
│ ├── ema.py
│ ├── logger.py
│ ├── metrics.py
│ ├── op_flops_funs.py
│ ├── progbar.py
│ ├── timer.py
│ ├── train_profiler.py
│ ├── utils.py
│ └── visualize.py
├── predict.py
├── requirements.txt
├── setup.py
├── slim
├── distill
│ ├── README.md
│ ├── distill_config.py
│ ├── distill_train.py
│ └── distill_utils.py
├── prune
│ ├── README.md
│ └── prune.py
└── quant
│ ├── README.md
│ ├── ptq.py
│ ├── qat_config.py
│ ├── qat_export.py
│ ├── qat_train.py
│ └── qat_val.py
├── test_tipc
├── README.md
├── benchmark_train.sh
├── common_func.sh
├── compare_results.py
├── configs
│ ├── _base_
│ │ ├── ade20k.yml
│ │ ├── autonue.yml
│ │ ├── cityscapes.yml
│ │ ├── cityscapes_1024x1024.yml
│ │ ├── cityscapes_769x769.yml
│ │ ├── cityscapes_769x769_setr.yml
│ │ ├── coco_stuff.yml
│ │ ├── pascal_context.yml
│ │ ├── pascal_voc12.yml
│ │ └── pascal_voc12aug.yml
│ ├── bisenetv1
│ │ ├── bisenetv1_resnet18_os8_cityscapes_1024x512_160k.yml
│ │ └── train_infer_python.txt
│ ├── bisenetv2
│ │ ├── bisenet_cityscapes_1024x1024_160k.yml
│ │ └── train_infer_python.txt
│ ├── ccnet
│ │ ├── ccnet_resnet101_os8_cityscapes_769x769_60k.yml
│ │ └── train_infer_python.txt
│ ├── ddrnet
│ │ ├── ddrnet23_cityscapes_1024x1024_120k.yml
│ │ └── train_infer_python.txt
│ ├── deeplabv3p_resnet50
│ │ ├── deeplabv3p_resnet50_humanseg_512x512_mini_supervisely.yml
│ │ ├── deeplabv3p_resnet50_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── deeplabv3p_resnet50_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── deeplabv3p_resnet50_cityscapes
│ │ ├── deeplabv3p_resnet50_1024x512_cityscapes.yml
│ │ └── train_infer_python.txt
│ ├── encnet
│ │ ├── encnet_resnet101_os8_cityscapes_1024x512_80k.yml
│ │ └── train_infer_python.txt
│ ├── enet
│ │ ├── enet_cityscapes_1024x512_adam_0.002_80k.yml
│ │ └── train_infer_python.txt
│ ├── espnetv2
│ │ ├── espnet_cityscapes_1024x512_120k.yml
│ │ └── train_infer_python.txt
│ ├── fastscnn
│ │ ├── fastscnn_cityscapes.yml
│ │ └── train_infer_python.txt
│ ├── fcn_hrnetw18
│ │ ├── fcn_hrnetw18_1024x512_cityscapes.yml
│ │ ├── fcn_hrnetw18_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── fcn_hrnetw18_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── fcn_hrnetw18_small
│ │ ├── fcn_hrnetw18_small_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── fcn_hrnetw18_small_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── fcn_hrnetw18_small_v1_humanseg_192x192_mini_supervisely.yml
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── glore
│ │ ├── glore_resnet50_os8_cityscapes_1024x512_80k.yml
│ │ └── train_infer_python.txt
│ ├── hrnet_w48_contrast
│ │ ├── HRNet_W48_contrast_cityscapes_1024x512_60k.yml
│ │ └── train_infer_python.txt
│ ├── ocrnet_hrnetw18
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── ocrnet_hrnetw18_cityscapes_1024x512_160k.yml
│ │ ├── ocrnet_hrnetw18_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── ocrnet_hrnetw18_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── ocrnet_hrnetw48
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── ocrnet_hrnetw48_cityscapes_1024x512.yml
│ │ ├── ocrnet_hrnetw48_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── ocrnet_hrnetw48_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── pfpnnet
│ │ ├── pfpn_resnet101_os8_cityscapes_512x1024_40k.yml
│ │ └── train_infer_python.txt
│ ├── pp_liteseg_stdc1
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── pp_liteseg_stdc1_cityscapes_1024x512_160k.yml
│ │ ├── pp_liteseg_stdc1_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── pp_liteseg_stdc1_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── pp_liteseg_stdc2
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── pp_liteseg_stdc2_cityscapes_1024x512_160k.yml
│ │ ├── pp_liteseg_stdc2_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── pp_liteseg_stdc2_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── pphumanseg_lite
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── pphumanseg_lite_mini_supervisely.yml
│ │ ├── pphumanseg_lite_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
│ │ ├── pphumanseg_lite_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
│ │ ├── train_infer_python.txt
│ │ ├── train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt
│ │ └── train_ptq_infer_python.txt
│ ├── ppmatting
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── modnet_mobilenetv2.yml
│ │ └── train_infer_python.txt
│ ├── segformer_b0
│ │ ├── segformer_b0_cityscapes_1024x1024_160k.yml
│ │ └── train_infer_python.txt
│ ├── stdc_stdc1
│ │ ├── inference_cpp.txt
│ │ ├── model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
│ │ ├── model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
│ │ ├── stdc1_seg_cityscapes_1024x512_80k.yml
│ │ └── train_infer_python.txt
│ └── upernet
│ │ ├── train_infer_python.txt
│ │ └── upernet_resnet101_os8_cityscapes_512x1024_40k.yml
├── cpp
│ ├── CMakeLists.txt
│ ├── build.sh
│ ├── cityscapes_demo.png
│ ├── humanseg_demo.jpg
│ ├── include
│ │ ├── config.h
│ │ ├── preprocess_op.h
│ │ ├── seg.h
│ │ └── utility.h
│ └── src
│ │ ├── config.cpp
│ │ ├── main.cpp
│ │ ├── preprocess_op.cpp
│ │ ├── seg.cpp
│ │ └── utility.cpp
├── docs
│ ├── benchmark_train.md
│ ├── cityscapes_demo.jpg
│ ├── cityscapes_val_5.list
│ ├── compare_right.png
│ ├── compare_wrong.png
│ ├── guide.png
│ ├── install.md
│ ├── test.png
│ ├── test_infer_js.md
│ ├── test_inference_cpp.md
│ ├── test_paddle2onnx.md
│ ├── test_serving_infer_cpp.md
│ ├── test_serving_infer_python.md
│ ├── test_train_amp_inference_python.md
│ ├── test_train_fleet_inference_python.md
│ └── test_train_inference_python.md
├── prepare.sh
├── prepare_js.sh
├── requirements.txt
├── results
│ ├── python_fcn_hrnetw18_small_results_fp16.txt
│ └── python_fcn_hrnetw18_small_results_fp32.txt
├── scripts
│ └── analysis.py
├── serving_cpp
│ ├── general_seg_op.cpp
│ ├── general_seg_op.h
│ ├── modify_serving_client_conf.py
│ ├── prepare_server.sh
│ └── serving_client.py
├── serving_python
│ ├── config.yml
│ ├── pipeline_http_client.py
│ ├── preprocess_ops.py
│ └── web_service.py
├── test_infer_js.sh
├── test_inference_cpp.sh
├── test_paddle2onnx.sh
├── test_ptq_inference_python.sh
├── test_serving_infer_cpp.sh
├── test_serving_infer_python.sh
├── test_train_inference_python.sh
├── val.py
└── web
│ ├── imgs
│ ├── human.jpg
│ └── seg.png
│ ├── index.html
│ ├── index.test.js
│ ├── jest-puppeteer.config.js
│ └── jest.config.js
├── tests
├── analyze_infer_log.py
├── run_check_install.sh
├── test_infer_benchmark.sh
└── test_infer_dataset.sh
├── tools
├── analyze_model.py
├── convert_cityscapes.py
├── convert_cocostuff.py
├── convert_voc2010.py
├── create_dataset_list.py
├── gray2pseudo_color.py
├── labelme2seg.py
├── plot_model_performance.py
├── split_dataset_list.py
├── visualize_annotation.py
└── voc_augment.py
├── train.py
└── val.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/---feature-request--------.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F680 Feature request / 新功能需求"
3 | about: Suggest an idea for this project / 提出一个新的功能需求或改进建议
4 | title: "[Feature Request]"
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | Welcome to propose a new feature! To help us understand your great feature, please provide following information:
11 | 1. A clear and concise description of the proposed feature.
12 | 2. Tell us why the feature is useful.
13 | 3. If possible, please show related codes .
14 |
15 | ---
16 |
17 | 欢迎提出一个新功能需求,为了帮助我们更好理解您的需求,辛苦提供下面信息:
18 | 1. 清晰简洁的语言提出新功能需求。
19 | 2. 请描述这个需求的必要性。
20 | 3. 如果可以,辛苦您提供相关代码实现效果。
21 |
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | based_on_style = pep8
3 | column_limit = 80
4 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 |
3 | python:
4 | - '3.6'
5 |
6 | env:
7 | - PYTHONPATH=${PWD}
8 |
9 | install:
10 | - pip install --upgrade paddlepaddle
11 | - pip install -r requirements.txt
12 |
13 | script:
14 | - /bin/bash legacy/test/ci/check_code_style.sh
15 |
16 | notifications:
17 | email:
18 | on_success: change
19 | on_failure: always
20 |
--------------------------------------------------------------------------------
/EISeg/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include eiseg/config/*
2 | include eiseg/resource/*
3 | include eiseg/util/translate/*
--------------------------------------------------------------------------------
/EISeg/eiseg/config/colormap.txt:
--------------------------------------------------------------------------------
1 | 53,119,181
2 | 245,128,6
3 | 67,159,36
4 | 204,43,41
5 | 145,104,190
6 | 135,86,75
7 | 219,120,195
8 | 127,127,127
9 | 187,189,18
10 | 72,190,207
11 | 178,199,233
12 | 248,187,118
13 | 160,222,135
14 | 247,153,150
15 | 195,176,214
16 | 192,156,148
17 | 241,183,211
18 | 199,199,199
19 | 218,219,139
20 | 166,218,229
--------------------------------------------------------------------------------
/EISeg/eiseg/inference/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/inference/__init__.py
--------------------------------------------------------------------------------
/EISeg/eiseg/plugin/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/EISeg/eiseg/plugin/video/__init__.py:
--------------------------------------------------------------------------------
1 | # The video propagation and fusion code was heavily based on https://github.com/hkchengrex/MiVOS
# Users should be careful about adopting these functions in any commercial matters.
# https://github.com/hkchengrex/MiVOS/blob/main/LICENSE
from .inference_core import InferenceCore
from .video_tools import overlay_davis
--------------------------------------------------------------------------------
/EISeg/eiseg/plugin/video/util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/plugin/video/util/__init__.py
--------------------------------------------------------------------------------
/EISeg/eiseg/plugin/video/util/range_transform.py:
--------------------------------------------------------------------------------
1 | # The video propagation and fusion code was heavily based on https://github.com/hkchengrex/MiVOS
2 | # Users should be careful about adopting these functions in any commercial matters.
3 | # https://github.com/hkchengrex/MiVOS/blob/main/LICENSE
4 |
5 | from paddle.vision import transforms
6 |
7 | im_mean = (124, 116, 104)
8 |
9 | im_normalization = transforms.Normalize(
10 | mean=[0.485, 0.456, 0.406],
11 | std=[0.229, 0.224, 0.225], )
12 |
13 | inv_im_trans = transforms.Normalize(
14 | mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],
15 | std=[1 / 0.229, 1 / 0.224, 1 / 0.225], )
16 |
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/3D.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/3D.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/About.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/About.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/AutoSave.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/AutoSave.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/ChangeOutputDir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/ChangeOutputDir.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Clear.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Clear.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/ClearLabel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/ClearLabel.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/ClearRecent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/ClearRecent.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Close.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Data.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/DeleteAllPolygon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/DeleteAllPolygon.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/DeletePolygon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/DeletePolygon.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Egypt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Egypt.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/English.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/English.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/ExportLabel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/ExportLabel.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/File.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/File.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/ImportLabel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/ImportLabel.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Label.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Label.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Language.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Language.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Log.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Log.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/MedicalImaging.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/MedicalImaging.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Model.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/N2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/N2.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Net.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Net.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Next.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Next.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Ok.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Ok.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/OpenFolder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/OpenFolder.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/OpenImage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/OpenImage.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Paddle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Paddle.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Play.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Play.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Prev.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Prev.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Propagate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Propagate.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Qt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Qt.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Quit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Quit.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Redo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Redo.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/RemoteSensing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/RemoteSensing.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/ReportBug.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/ReportBug.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Same.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Same.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Save.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Save.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/SaveAs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/SaveAs.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/SaveCOCO.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/SaveCOCO.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/SaveCutout.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/SaveCutout.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/SaveGrayScale.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/SaveGrayScale.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/SaveJson.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/SaveJson.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/SaveLargestCC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/SaveLargestCC.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/SavePseudoColor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/SavePseudoColor.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Setting.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Setting.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Shortcut.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Shortcut.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Show.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Show.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/ShowRSPoly.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/ShowRSPoly.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Stop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Stop.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Undo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Undo.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Use.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Use.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/Video.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/Video.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/VideoAnno.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/VideoAnno.png
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/loading.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/loading.gif
--------------------------------------------------------------------------------
/EISeg/eiseg/resource/中文.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/resource/中文.png
--------------------------------------------------------------------------------
/EISeg/eiseg/util/__init__.py:
--------------------------------------------------------------------------------
1 | from .qt import newAction, addActions, struct, newIcon
2 | from .config import parse_configs, save_configs
3 | from .colormap import colorMap
4 | from .polygon import get_polygon, Instructions
5 | from .manager import MODELS
6 | from .language import TransUI
7 | from .coco.coco import COCO
8 | from .label import LabelList
9 | from .opath import check_cn, normcase
10 | from .palette import pal_color_map, color_map
--------------------------------------------------------------------------------
/EISeg/eiseg/util/coco/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/EISeg/eiseg/util/exp_imports/default.py:
--------------------------------------------------------------------------------
1 | import paddle
2 | from functools import partial
3 | from easydict import EasyDict as edict
4 | from albumentations import *
5 |
6 | from data.datasets import *
7 | from model.losses import *
8 | from data.transforms import *
9 | #from isegm.engine.trainer import ISTrainer
10 | from model.metrics import AdaptiveIoU
11 | from data.points_sampler import MultiPointSampler
12 | from model.initializer import XavierGluon
13 |
14 | from model.is_hrnet_model import HRNetModel
15 | from model.is_deeplab_model import DeeplabModel
--------------------------------------------------------------------------------
/EISeg/eiseg/util/opath.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | # 检查中文
5 | def check_cn(path):
6 | zh_model = re.compile(u'[\u4e00-\u9fa5]')
7 | return zh_model.search(path)
8 |
9 |
10 | # 替换斜杠
11 | def normcase(path):
12 | return eval(repr(path).replace('\\\\', '/'))
13 |
--------------------------------------------------------------------------------
/EISeg/eiseg/util/palette.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def get_color_map(N=256):
5 | def bitget(byteval, idx):
6 | return ((byteval & (1 << idx)) != 0)
7 |
8 | cmap = np.zeros((N, 3), dtype=np.uint8)
9 | for i in range(N):
10 | r = g = b = 0
11 | c = i
12 | for j in range(8):
13 | r = r | (bitget(c, 0) << 7 - j)
14 | g = g | (bitget(c, 1) << 7 - j)
15 | b = b | (bitget(c, 2) << 7 - j)
16 | c = c >> 3
17 |
18 | cmap[i] = np.array([r, g, b])
19 |
20 | return cmap
21 |
22 |
23 | color_map = get_color_map()
24 |
25 |
26 | def pal_color_map():
27 | return color_map
28 |
--------------------------------------------------------------------------------
/EISeg/eiseg/util/translate/Arabic.qm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/util/translate/Arabic.qm
--------------------------------------------------------------------------------
/EISeg/eiseg/util/translate/English.qm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/EISeg/eiseg/util/translate/English.qm
--------------------------------------------------------------------------------
/EISeg/eiseg/widget/__init__.py:
--------------------------------------------------------------------------------
1 | from .shortcut import ShortcutWidget
2 | from .loading import LoadingWidget
3 | from .line import LineItem
4 | from .grip import GripItem
5 | from .bbox import BBoxAnnotation
6 | from .polygon import PolygonAnnotation
7 | from .scene import AnnotationScene
8 | from .view import AnnotationView
9 | from .create import (create_text, create_button, create_slider, DockWidget,
10 | creat_dock)
11 | from .table import TableWidget
12 |
--------------------------------------------------------------------------------
/EISeg/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ROOT=`cd "$(dirname ${BASH_SOURCE[0]})" && pwd`
4 |
5 | echo "ROOT : $ROOT"
6 |
7 | export PYTHONPATH=$PYTHONPATH:$ROOT/eiseg
8 |
--------------------------------------------------------------------------------
/EISeg/requirements-med.txt:
--------------------------------------------------------------------------------
1 | SimpleITK
2 |
--------------------------------------------------------------------------------
/EISeg/requirements-rs.txt:
--------------------------------------------------------------------------------
1 | GDAL>=3.3.0
2 | rasterio>=1.2.4
3 |
--------------------------------------------------------------------------------
/EISeg/requirements-video.txt:
--------------------------------------------------------------------------------
1 | vtk
--------------------------------------------------------------------------------
/EISeg/requirements.txt:
--------------------------------------------------------------------------------
1 | pyqt5
2 | qtpy
3 | opencv-python
4 | scipy
5 | paddleseg
6 | albumentations
7 | cython
8 | pyyaml
9 | wget
10 | requests
11 | easydict
12 | scikit-image
13 | protobuf==3.20.0
14 |
--------------------------------------------------------------------------------
/EISeg/tool/pypi.sh:
--------------------------------------------------------------------------------
1 | rm dist/*
2 | python setup.py sdist bdist_wheel
3 | twine upload --repository-url https://test.pypi.org/legacy/ dist/* --verbose
4 | # https://upload.pypi.org/legacy/
5 |
6 | conda create -n test python=3.9
7 | conda activate test
8 | pip install --upgrade eiseg
9 | pip install paddlepaddle
10 | eiseg
11 |
--------------------------------------------------------------------------------
/EISeg/tool/translate.pro:
--------------------------------------------------------------------------------
1 | CODECFORTR = UTF-8
2 | SOURCES = ../eiseg/app.py ../eiseg/ui.py ../eiseg/widget/shortcut.py
3 | TRANSLATIONS = ./ts/out.ts
4 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/Composition-1k/closeform_composition1k.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | val_dataset:
4 | type: Composition1K
5 | dataset_root: data/Composition-1k
6 | val_file: val.txt
7 | separator: '|'
8 | transforms:
9 | - type: LoadImages
10 | - type: ResizeByShort
11 | short_size: 512
12 | - type: ResizeToIntMult
13 | mult_int: 32
14 | - type: Normalize
15 | mode: val
16 | get_trimap: True
17 |
18 | model:
19 | type: CloseFormMatting
20 |
21 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/Distinctions-646/closeform_distinctions646.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | val_dataset:
4 | type: Distinctions646
5 | dataset_root: data/Distinctions-646
6 | val_file: val.txt
7 | separator: '|'
8 | transforms:
9 | - type: LoadImages
10 | - type: ResizeByShort
11 | short_size: 512
12 | - type: ResizeToIntMult
13 | mult_int: 32
14 | - type: Normalize
15 | mode: val
16 | get_trimap: True
17 |
18 | model:
19 | type: CloseFormMatting
20 |
21 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/PPM/README.md:
--------------------------------------------------------------------------------
1 | ### PPM
2 |
3 | | Method | SAD | MSE | Grad | Conn |
4 | |-|-|-|-|-|
5 | |ClosedFormMatting|40.6251|0.0782|55.5716|40.6646|
6 | |KNNMatting|41.5604|0.0681|52.5200|42.1784|
7 | |FastMatting|35.8735|0.0492|48.9267|35.6183|
8 | |LearningBasedMatting|40.5506|0.0776|55.3923|40.5690|
9 | |RandomWalksMatting|54.6315|0.0962|69.8779|54.0870|
10 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/PPM/closeform.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | val_dataset:
4 | type: MattingDataset
5 | dataset_root: data/PPM-100
6 | val_file: val.txt
7 | transforms:
8 | - type: LoadImages
9 | - type: ResizeByShort
10 | short_size: 512
11 | - type: ResizeToIntMult
12 | mult_int: 32
13 | - type: Normalize
14 | mode: val
15 | get_trimap: True
16 |
17 | model:
18 | type: CloseFormMatting
19 |
20 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/PPM/fast.yml:
--------------------------------------------------------------------------------
1 |
2 | _base_: closeform.yml
3 |
4 | model:
5 | type: FastMatting
6 |
7 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/PPM/knn.yml:
--------------------------------------------------------------------------------
1 |
2 | _base_: closeform.yml
3 |
4 | model:
5 | type: KNNMatting
6 |
7 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/PPM/learningbased.yml:
--------------------------------------------------------------------------------
1 |
2 | _base_: closeform.yml
3 |
4 | model:
5 | type: LearningBasedMatting
6 |
7 |
--------------------------------------------------------------------------------
/Matting/configs/benchmarks/PPM/randomwalks.yml:
--------------------------------------------------------------------------------
1 |
2 | _base_: closeform.yml
3 |
4 | model:
5 | type: RandomWalksMatting
6 |
7 |
--------------------------------------------------------------------------------
/Matting/configs/modnet/modnet-hrnet_w18.yml:
--------------------------------------------------------------------------------
1 | _base_: modnet-mobilenetv2.yml
2 | model:
3 | backbone:
4 | type: HRNet_W18
5 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
6 |
--------------------------------------------------------------------------------
/Matting/configs/modnet/modnet-resnet50_vd.yml:
--------------------------------------------------------------------------------
1 | _base_: modnet-mobilenetv2.yml
2 | model:
3 | backbone:
4 | type: ResNet50_vd
5 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
6 |
--------------------------------------------------------------------------------
/Matting/configs/ppmatting/ppmatting-hrnet_w48-composition.yml:
--------------------------------------------------------------------------------
1 | _base_: 'ppmatting-hrnet_w48-distinctions.yml'
2 |
3 | train_dataset:
4 | dataset_root: data/matting/Composition-1k
5 |
6 | val_dataset:
7 | dataset_root: data/matting/Composition-1k
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .gradle
3 | /local.properties
4 | /.idea/caches
5 | /.idea/libraries
6 | /.idea/modules.xml
7 | /.idea/workspace.xml
8 | /.idea/navEditor.xml
9 | /.idea/assetWizardSettings.xml
10 | .DS_Store
11 | /build
12 | /captures
13 | .externalNativeBuild
14 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/.gitignore:
--------------------------------------------------------------------------------
1 | /build
2 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/local.properties:
--------------------------------------------------------------------------------
1 | ## This file must *NOT* be checked into Version Control Systems,
2 | # as it contains information specific to your local configuration.
3 | #
4 | # Location of the SDK. This is only used by Gradle.
5 | # For customization when using a Version Control System, please read the
6 | # header note.
7 | #Mon Nov 25 17:01:52 CST 2019
8 | sdk.dir=/Users/chenlingchi/Library/Android/sdk
9 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/assets/image_matting/images/bg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/assets/image_matting/images/bg.jpg
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/assets/image_matting/images/human.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/assets/image_matting/images/human.jpg
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/assets/image_matting/labels/label_list:
--------------------------------------------------------------------------------
1 | background
2 | human
3 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/drawable/paddle_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/drawable/paddle_logo.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/main/res/values/colors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | #008577
4 | #00574B
5 | #D81B60
6 |
7 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/app/src/test/java/com/baidu/paddle/lite/demo/ExampleUnitTest.java:
--------------------------------------------------------------------------------
1 | package com.baidu.paddle.lite.demo;
2 |
3 | import org.junit.Test;
4 |
5 | import static org.junit.Assert.*;
6 |
7 | /**
8 | * Example local unit test, which will execute on the development machine (host).
9 | *
10 | * @see Testing documentation
11 | */
12 | public class ExampleUnitTest {
13 | @Test
14 | public void addition_isCorrect() {
15 | assertEquals(4, 2 + 2);
16 | }
17 | }
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/Matting/deploy/human_matting_android_demo/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Thu Aug 22 15:05:37 CST 2019
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-5.1.1-all.zip
7 |
--------------------------------------------------------------------------------
/Matting/deploy/human_matting_android_demo/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':app'
2 |
--------------------------------------------------------------------------------
/Matting/ppmatting/__init__.py:
--------------------------------------------------------------------------------
1 | from . import ml, metrics, transforms, datasets, models
2 |
--------------------------------------------------------------------------------
/Matting/ppmatting/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .val import evaluate
2 | from .val_ml import evaluate_ml
3 | from .train import train
4 | from .predict import predict
--------------------------------------------------------------------------------
/Matting/ppmatting/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .metric import MSE, SAD, Grad, Conn
2 |
3 | metrics_class_dict = {'sad': SAD, 'mse': MSE, 'grad': Grad, 'conn': Conn}
4 |
--------------------------------------------------------------------------------
/Matting/ppmatting/ml/__init__.py:
--------------------------------------------------------------------------------
1 | from .methods import CloseFormMatting, KNNMatting, LearningBasedMatting, FastMatting, RandomWalksMatting
2 |
--------------------------------------------------------------------------------
/Matting/ppmatting/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .backbone import *
2 | from .losses import *
3 | from .modnet import MODNet
4 | from .human_matting import HumanMatting
5 | from .dim import DIM
6 | from .ppmatting import PPMatting
7 | from .gca import GCABaseline, GCA
8 |
--------------------------------------------------------------------------------
/Matting/ppmatting/models/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .mobilenet_v2 import *
2 | from .hrnet import *
3 | from .resnet_vd import *
4 | from .vgg import *
5 | from .gca_enc import *
--------------------------------------------------------------------------------
/Matting/ppmatting/models/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .loss import *
2 |
--------------------------------------------------------------------------------
/Matting/ppmatting/transforms/__init__.py:
--------------------------------------------------------------------------------
1 | from .transforms import *
2 |
--------------------------------------------------------------------------------
/Matting/ppmatting/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .estimate_foreground_ml import estimate_foreground_ml
2 | from .utils import get_files, get_image_list, mkdir
3 |
--------------------------------------------------------------------------------
/Matting/requirements.txt:
--------------------------------------------------------------------------------
1 | paddleseg >= 2.5
2 | pymatting
3 | scikit-image
4 | numba
5 | opencv-python==4.5.4.60
6 |
--------------------------------------------------------------------------------
/benchmark/configs/ocrnet_hrnetw48.yml:
--------------------------------------------------------------------------------
1 | # The ocrnet_hrnetw48 config for train benchmark
2 | _base_: './cityscapes_30imgs.yml'
3 |
4 | batch_size: 2
5 | iters: 500
6 |
7 | model:
8 | type: OCRNet
9 | backbone:
10 | type: HRNet_W48
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz
12 | num_classes: 19
13 | backbone_indices: [0]
14 |
15 | optimizer:
16 | type: sgd
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.01
21 | power: 0.9
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | - type: CrossEntropyLoss
27 | coef: [1, 0.4]
28 |
--------------------------------------------------------------------------------
/benchmark/run_fp16.sh:
--------------------------------------------------------------------------------
1 | export FLAGS_conv_workspace_size_limit=2000 #MB
2 | export FLAGS_cudnn_exhaustive_search=1
3 | export FLAGS_cudnn_batchnorm_spatial_persistent=1
4 |
5 | python train.py --config benchmark/deeplabv3p.yml \
6 | --iters=500 \
7 | --batch_size 4 \
8 | --learning_rate 0.01 \
9 | --num_workers 8 \
10 | --log_iters 20 \
11 | --data_format NHWC \
12 | --precision fp16
13 |
--------------------------------------------------------------------------------
/benchmark/run_fp32.sh:
--------------------------------------------------------------------------------
1 | export FLAGS_conv_workspace_size_limit=2000 #MB
2 | export FLAGS_cudnn_exhaustive_search=1
3 | export FLAGS_cudnn_batchnorm_spatial_persistent=1
4 |
5 | python train.py --config benchmark/deeplabv3p.yml \
6 | --iters=500 \
7 | --batch_size 2 \
8 | --learning_rate 0.01 \
9 | --num_workers 8 \
10 | --log_iters 20 \
11 | --data_format NCHW \
12 |
--------------------------------------------------------------------------------
/configs/_base_/cityscapes_1024x1024.yml:
--------------------------------------------------------------------------------
1 | _base_: './cityscapes.yml'
2 |
3 | train_dataset:
4 | transforms:
5 | - type: ResizeStepScaling
6 | min_scale_factor: 0.5
7 | max_scale_factor: 2.0
8 | scale_step_size: 0.25
9 | - type: RandomPaddingCrop
10 | crop_size: [1024, 1024]
11 | - type: RandomHorizontalFlip
12 | - type: RandomDistort
13 | brightness_range: 0.4
14 | contrast_range: 0.4
15 | saturation_range: 0.4
16 | - type: Normalize
17 |
18 | val_dataset:
19 | transforms:
20 | - type: Normalize
21 |
--------------------------------------------------------------------------------
/configs/_base_/cityscapes_769x769.yml:
--------------------------------------------------------------------------------
1 | _base_: './cityscapes.yml'
2 |
3 | train_dataset:
4 | transforms:
5 | - type: ResizeStepScaling
6 | min_scale_factor: 0.5
7 | max_scale_factor: 2.0
8 | scale_step_size: 0.25
9 | - type: RandomPaddingCrop
10 | crop_size: [769, 769]
11 | - type: RandomHorizontalFlip
12 | - type: RandomDistort
13 | brightness_range: 0.4
14 | contrast_range: 0.4
15 | saturation_range: 0.4
16 | - type: Normalize
17 |
18 | val_dataset:
19 | transforms:
20 | - type: Padding
21 | target_size: [2049, 1025]
22 | - type: Normalize
23 |
--------------------------------------------------------------------------------
/configs/_base_/cityscapes_769x769_setr.yml:
--------------------------------------------------------------------------------
1 | _base_: './cityscapes.yml'
2 |
3 | train_dataset:
4 | transforms:
5 | - type: ResizeStepScaling
6 | min_scale_factor: 0.25
7 | max_scale_factor: 2.0
8 | scale_step_size: 0.25
9 | - type: RandomPaddingCrop
10 | crop_size: [769, 769]
11 | - type: RandomHorizontalFlip
12 | - type: RandomDistort
13 | brightness_range: 0.5
14 | contrast_range: 0.5
15 | saturation_range: 0.5
16 | - type: Normalize
17 |
18 | val_dataset:
19 | transforms:
20 | - type: Padding
21 | target_size: [2048, 1024]
22 | - type: Normalize
23 |
--------------------------------------------------------------------------------
/configs/_base_/pascal_voc12aug.yml:
--------------------------------------------------------------------------------
1 | _base_: './pascal_voc12.yml'
2 |
3 | train_dataset:
4 | mode: trainaug
5 |
--------------------------------------------------------------------------------
/configs/ann/ann_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'ann_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/ann/ann_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'ann_resnet50_os8_voc12aug_512x512_40k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/ann/ann_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | loss:
4 | types:
5 | - type: CrossEntropyLoss
6 | coef: [1, 0.4]
7 |
8 | model:
9 | type: ANN
10 | backbone:
11 | type: ResNet50_vd
12 | output_stride: 8
13 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
14 | backbone_indices: [2, 3]
15 | key_value_channels: 256
16 | inter_channels: 512
17 | psp_size: [1, 3, 6, 8]
18 | enable_auxiliary_loss: True
19 | align_corners: False
20 | pretrained: null
21 |
--------------------------------------------------------------------------------
/configs/attention_unet/README.md:
--------------------------------------------------------------------------------
1 | # Attention U-Net: Learning Where to Look for the Pancreas
2 |
3 | ## Reference
4 |
5 | > Oktay, Ozan, Jo Schlemper, Loic Le Folgoc, Matthew Lee, Mattias Heinrich, Kazunari Misawa, Kensaku Mori et al. "Attention u-net: Learning where to look for the pancreas." arXiv preprint arXiv:1804.03999 (2018).
6 |
--------------------------------------------------------------------------------
/configs/attention_unet/attention_unet_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | lr_scheduler:
7 | type: PolynomialDecay
8 | learning_rate: 0.05
9 | end_lr: 0.0
10 | power: 0.9
11 |
12 | model:
13 | type: AttentionUNet
14 | pretrained: Null
15 |
--------------------------------------------------------------------------------
/configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | model:
4 | type: BiSeNetV2
5 | num_classes: 19
6 |
7 | optimizer:
8 | type: sgd
9 | weight_decay: 0.0005
10 |
11 | loss:
12 | types:
13 | - type: CrossEntropyLoss
14 | - type: CrossEntropyLoss
15 | - type: CrossEntropyLoss
16 | - type: CrossEntropyLoss
17 | - type: CrossEntropyLoss
18 | coef: [1, 1, 1, 1, 1]
19 |
20 | batch_size: 4
21 | iters: 160000
22 |
23 | lr_scheduler:
24 | type: PolynomialDecay
25 | learning_rate: 0.05
26 | end_lr: 0.0
27 | power: 0.9
28 |
--------------------------------------------------------------------------------
/configs/bisenetv1/bisenetv1_resnet18_os8_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | model:
7 | type: BiseNetV1
8 | backbone:
9 | type: ResNet18_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet18_vd_ssld_v2.tar.gz
12 |
13 | optimizer:
14 | type: sgd
15 | weight_decay: 0.0005
16 |
17 | loss:
18 | types:
19 | - type: OhemCrossEntropyLoss
20 | - type: OhemCrossEntropyLoss
21 | - type: OhemCrossEntropyLoss
22 | coef: [1, 1, 1]
23 |
24 | lr_scheduler:
25 | type: PolynomialDecay
26 | learning_rate: 0.01
27 | end_lr: 0.0
28 | power: 0.9
29 |
--------------------------------------------------------------------------------
/configs/ccnet/ccnet_resnet101_os8_cityscapes_769x769_60k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_769x769.yml'
2 |
3 | batch_size: 2
4 | iters: 60000
5 |
6 | model:
7 | type: CCNet
8 | backbone:
9 | type: ResNet101_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
12 | backbone_indices: [2, 3]
13 | enable_auxiliary_loss: True
14 | dropout_prob: 0.1
15 | recurrence: 2
16 |
17 | loss:
18 | types:
19 | - type: OhemCrossEntropyLoss
20 | - type: CrossEntropyLoss
21 | coef: [1, 0.4]
22 |
23 | lr_scheduler:
24 | type: PolynomialDecay
25 | learning_rate: 0.01
26 | power: 0.9
27 | end_lr: 1.0e-4
28 |
--------------------------------------------------------------------------------
/configs/danet/danet_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 1
4 | iters: 80000
5 |
6 | model:
7 | type: DANet
8 | backbone:
9 | type: ResNet101_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
12 | num_classes: 19
13 | backbone_indices: [2, 3]
14 |
15 | optimizer:
16 | type: sgd
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.01
21 | power: 0.9
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | - type: CrossEntropyLoss
27 | - type: CrossEntropyLoss
28 | - type: CrossEntropyLoss
29 | coef: [1, 1, 1, 0.4]
30 |
--------------------------------------------------------------------------------
/configs/danet/danet_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: DANet
8 | backbone:
9 | type: ResNet50_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
12 | num_classes: 19
13 | backbone_indices: [2, 3]
14 |
15 | optimizer:
16 | type: sgd
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.01
21 | power: 0.9
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | - type: CrossEntropyLoss
27 | - type: CrossEntropyLoss
28 | - type: CrossEntropyLoss
29 | coef: [1, 1, 1, 0.4]
30 |
--------------------------------------------------------------------------------
/configs/danet/danet_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: DANet
5 | backbone:
6 | type: ResNet50_vd
7 | output_stride: 8
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
9 | backbone_indices: [2, 3]
10 |
11 | loss:
12 | types:
13 | - type: CrossEntropyLoss
14 | - type: CrossEntropyLoss
15 | - type: CrossEntropyLoss
16 | - type: CrossEntropyLoss
17 | coef: [1, 1, 1, 0.4]
18 |
--------------------------------------------------------------------------------
/configs/ddrnet/ddrnet23_cityscapes_1024x1024_120k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 3
4 | iters: 120000
5 |
6 | model:
7 | type: DDRNet_23
8 | enable_auxiliary_loss: False
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ddrnet23_cityscapes_1024x1024_120k/pretrain/model.pdparams
10 |
11 | optimizer:
12 | type: sgd
13 | weight_decay: 0.0005
14 |
15 | loss:
16 | types:
17 | - type: OhemCrossEntropyLoss
18 | coef: [1]
19 |
20 | lr_scheduler:
21 | type: PolynomialDecay
22 | learning_rate: 0.01
23 | end_lr: 0.0
24 | power: 0.9
25 |
--------------------------------------------------------------------------------
/configs/deeplabv3/deeplabv3_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/deeplabv3/deeplabv3_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/deeplabv3/deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: DeepLabV3
8 | backbone:
9 | type: ResNet50_vd
10 | output_stride: 8
11 | multi_grid: [1, 2, 4]
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
13 | backbone_indices: [3]
14 | aspp_ratios: [1, 12, 24, 36]
15 | aspp_out_channels: 256
16 | align_corners: False
17 | pretrained: null
18 |
--------------------------------------------------------------------------------
/configs/deeplabv3/deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: DeepLabV3
5 | backbone:
6 | type: ResNet50_vd
7 | output_stride: 8
8 | multi_grid: [1, 2, 4]
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | backbone_indices: [3]
11 | aspp_ratios: [1, 12, 24, 36]
12 | aspp_out_channels: 256
13 | align_corners: False
14 | pretrained: null
15 |
--------------------------------------------------------------------------------
/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 |
4 | model:
5 | backbone:
6 | type: ResNet101_vd
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
8 |
--------------------------------------------------------------------------------
/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_769x769.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: DeepLabV3P
8 | backbone:
9 | type: ResNet101_vd
10 | output_stride: 8
11 | multi_grid: [1, 2, 4]
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
13 | num_classes: 19
14 | backbone_indices: [0, 3]
15 | aspp_ratios: [1, 12, 24, 36]
16 | aspp_out_channels: 256
17 | align_corners: True
18 | pretrained: null
19 |
--------------------------------------------------------------------------------
/configs/deeplabv3p/deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/deeplabv3p/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: DeepLabV3P
8 | backbone:
9 | type: ResNet50_vd
10 | output_stride: 8
11 | multi_grid: [1, 2, 4]
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
13 | num_classes: 19
14 | backbone_indices: [0, 3]
15 | aspp_ratios: [1, 12, 24, 36]
16 | aspp_out_channels: 256
17 | align_corners: False
18 | pretrained: null
19 |
--------------------------------------------------------------------------------
/configs/deeplabv3p/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k_rmiloss.yml:
--------------------------------------------------------------------------------
1 | _base_: 'deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 | loss:
4 | types:
5 | - type: MixedLoss
6 | losses:
7 | - type: CrossEntropyLoss
8 | - type: RMILoss
9 | coef: [0.5, 0.5]
10 |
--------------------------------------------------------------------------------
/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: DeepLabV3P
5 | backbone:
6 | type: ResNet50_vd
7 | output_stride: 8
8 | multi_grid: [1, 2, 4]
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | backbone_indices: [0, 3]
11 | aspp_ratios: [1, 12, 24, 36]
12 | aspp_out_channels: 256
13 | align_corners: False
14 | pretrained: null
15 |
--------------------------------------------------------------------------------
/configs/dmnet/dmnet_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: DMNet
8 | backbone:
9 | type: ResNet101_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
12 |
13 | optimizer:
14 | type: sgd
15 | weight_decay: 0.0005
16 |
17 | loss:
18 | types:
19 | - type: CrossEntropyLoss
20 | - type: CrossEntropyLoss
21 | coef: [1, 0.4]
22 |
23 | lr_scheduler:
24 | type: PolynomialDecay
25 | learning_rate: 0.01
26 | end_lr: 0.0
27 | power: 0.9
28 |
--------------------------------------------------------------------------------
/configs/dnlnet/dnlnet_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: DNLNet
8 | backbone:
9 | type: ResNet101_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
12 | num_classes: 19
13 |
14 | optimizer:
15 | type: sgd
16 | momentum: 0.9
17 | weight_decay: 0.00004
18 |
19 | lr_scheduler:
20 | type: PolynomialDecay
21 | learning_rate: 0.01
22 | power: 0.9
23 |
24 |
25 | loss:
26 | types:
27 | - type: CrossEntropyLoss
28 | - type: CrossEntropyLoss
29 | coef: [1, 0.4]
30 |
--------------------------------------------------------------------------------
/configs/dnlnet/dnlnet_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: DNLNet
5 | backbone:
6 | type: ResNet101_vd
7 | output_stride: 8
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
9 |
10 | optimizer:
11 | type: sgd
12 | momentum: 0.9
13 | weight_decay: 4.0e-05
14 |
15 | lr_scheduler:
16 | type: PolynomialDecay
17 | learning_rate: 0.01
18 | power: 0.9
19 |
20 | loss:
21 | types:
22 | - type: CrossEntropyLoss
23 | - type: CrossEntropyLoss
24 | coef: [1, 0.4]
25 |
--------------------------------------------------------------------------------
/configs/dnlnet/dnlnet_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: DNLNet
8 | backbone:
9 | type: ResNet50_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
12 | num_classes: 19
13 |
14 | optimizer:
15 | type: sgd
16 | momentum: 0.9
17 | weight_decay: 0.00004
18 |
19 | lr_scheduler:
20 | type: PolynomialDecay
21 | learning_rate: 0.01
22 | power: 0.9
23 |
24 |
25 | loss:
26 | types:
27 | - type: CrossEntropyLoss
28 | - type: CrossEntropyLoss
29 | coef: [1, 0.4]
30 |
--------------------------------------------------------------------------------
/configs/dnlnet/dnlnet_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: DNLNet
5 | backbone:
6 | type: ResNet50_vd
7 | output_stride: 8
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
9 |
10 | optimizer:
11 | type: sgd
12 | momentum: 0.9
13 | weight_decay: 4.0e-05
14 |
15 | lr_scheduler:
16 | type: PolynomialDecay
17 | learning_rate: 0.01
18 | power: 0.9
19 |
20 | loss:
21 | types:
22 | - type: CrossEntropyLoss
23 | - type: CrossEntropyLoss
24 | coef: [1, 0.4]
25 |
--------------------------------------------------------------------------------
/configs/emanet/emanet_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: EMANet
5 | backbone:
6 | type: ResNet101_vd
7 | output_stride: 8
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
9 | ema_channels: 512
10 | gc_channels: 256
11 | num_bases: 64
12 | stage_num: 3
13 | momentum: 0.1
14 | concat_input: True
15 | enable_auxiliary_loss: True
16 | align_corners: True
17 |
18 | optimizer:
19 | type: sgd
20 | momentum: 0.9
21 | weight_decay: 0.0005
22 |
23 |
24 | loss:
25 | types:
26 | - type: CrossEntropyLoss
27 | - type: CrossEntropyLoss
28 | coef: [1, 0.4]
29 |
--------------------------------------------------------------------------------
/configs/emanet/emanet_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 |
4 | model:
5 | type: EMANet
6 | backbone:
7 | type: ResNet50_vd
8 | output_stride: 8
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | ema_channels: 512
11 | gc_channels: 256
12 | num_bases: 64
13 | stage_num: 3
14 | momentum: 0.1
15 | concat_input: True
16 | enable_auxiliary_loss: True
17 | align_corners: True
18 |
19 | optimizer:
20 | type: sgd
21 | momentum: 0.9
22 | weight_decay: 0.0005
23 |
24 |
25 | loss:
26 | types:
27 | - type: CrossEntropyLoss
28 | - type: CrossEntropyLoss
29 | coef: [1, 0.4]
30 |
--------------------------------------------------------------------------------
/configs/fastscnn/fastscnn_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | loss:
7 | types:
8 | - type: CrossEntropyLoss
9 | coef: [1.0, 0.4]
10 |
11 | lr_scheduler:
12 | type: PolynomialDecay
13 | learning_rate: 0.05
14 | end_lr: 1.0e-4
15 | power: 0.9
16 |
17 | model:
18 | type: FastSCNN
19 | num_classes: 19
20 | enable_auxiliary_loss: True
21 | pretrained: null
22 |
--------------------------------------------------------------------------------
/configs/fastscnn/fastscnn_cityscapes_1024x1024_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 4
4 | iters: 40000
5 |
6 | loss:
7 | types:
8 | - type: CrossEntropyLoss
9 | coef: [1.0, 0.4]
10 |
11 | lr_scheduler:
12 | type: PolynomialDecay
13 | learning_rate: 0.025
14 | end_lr: 1.0e-4
15 | power: 0.9
16 |
17 | model:
18 | type: FastSCNN
19 | num_classes: 19
20 | enable_auxiliary_loss: True
21 | pretrained: null
22 |
--------------------------------------------------------------------------------
/configs/fastscnn/fastscnn_cityscapes_1024x1024_40k_SCL.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 4
4 | iters: 40000
5 |
6 | loss:
7 | types:
8 | - type: MixedLoss
9 | losses:
10 | - type: CrossEntropyLoss
11 | - type: SemanticConnectivityLoss
12 | coef: [1, 0.01]
13 | - type: CrossEntropyLoss
14 | coef: [1.0, 0.4]
15 |
16 | lr_scheduler:
17 | type: PolynomialDecay
18 | learning_rate: 0.025
19 | end_lr: 1.0e-4
20 | power: 0.9
21 |
22 | model:
23 | type: FastSCNN
24 | num_classes: 19
25 | enable_auxiliary_loss: True
26 | pretrained: null
27 |
--------------------------------------------------------------------------------
/configs/fcn/fcn_hrnetw18_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | model:
4 | type: FCN
5 | backbone:
6 | type: HRNet_W18
7 | align_corners: False
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
9 | num_classes: 19
10 | pretrained: Null
11 | backbone_indices: [-1]
12 |
13 | optimizer:
14 | weight_decay: 0.0005
15 |
16 | iters: 80000
17 |
--------------------------------------------------------------------------------
/configs/fcn/fcn_hrnetw18_cityscapes_1024x512_80k_bs4.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | model:
4 | type: FCN
5 | backbone:
6 | type: HRNet_W18
7 | align_corners: False
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
9 | num_classes: 19
10 | pretrained: Null
11 | backbone_indices: [-1]
12 |
13 | optimizer:
14 | weight_decay: 0.0005
15 |
16 | iters: 80000
17 | batch_size: 4
18 |
--------------------------------------------------------------------------------
/configs/fcn/fcn_hrnetw18_cityscapes_1024x512_80k_bs4_SCL.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | model:
4 | type: FCN
5 | backbone:
6 | type: HRNet_W18
7 | align_corners: False
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
9 | num_classes: 19
10 | pretrained: Null
11 | backbone_indices: [-1]
12 |
13 | optimizer:
14 | weight_decay: 0.0005
15 |
16 | iters: 80000
17 | batch_size: 4
18 |
19 | loss:
20 | types:
21 | - type: MixedLoss
22 | losses:
23 | - type: CrossEntropyLoss
24 | - type: SemanticConnectivityLoss
25 | coef: [1, 0.05]
26 | coef: [1]
27 |
--------------------------------------------------------------------------------
/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: FCN
5 | backbone:
6 | type: HRNet_W18
7 | align_corners: False
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
9 | num_classes: 21
10 | pretrained: Null
11 | backbone_indices: [-1]
12 |
13 | optimizer:
14 | weight_decay: 0.0005
15 |
--------------------------------------------------------------------------------
/configs/fcn/fcn_hrnetw48_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: './fcn_hrnetw18_cityscapes_1024x512_80k.yml'
2 |
3 | model:
4 | backbone:
5 | type: HRNet_W48
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: './fcn_hrnetw18_voc12aug_512x512_40k.yml'
2 |
3 | model:
4 | backbone:
5 | type: HRNet_W48
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/gcnet/gcnet_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'gcnet_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: './gcnet_resnet50_os8_voc12aug_512x512_40k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/gcnet/gcnet_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | lr_scheduler:
7 | type: PolynomialDecay
8 | learning_rate: 0.01
9 | power: 0.9
10 | end_lr: 1.0e-5
11 |
12 | loss:
13 | types:
14 | - type: CrossEntropyLoss
15 | coef: [1, 0.4]
16 |
17 | model:
18 | type: GCNet
19 | backbone:
20 | type: ResNet50_vd
21 | output_stride: 8
22 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
23 | gc_channels: 512
24 | ratio: 0.25
25 | enable_auxiliary_loss: True
26 | align_corners: False
27 | pretrained: null
28 |
--------------------------------------------------------------------------------
/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | lr_scheduler:
4 | type: PolynomialDecay
5 | learning_rate: 0.01
6 | power: 0.9
7 | end_lr: 1.0e-5
8 |
9 | loss:
10 | types:
11 | - type: CrossEntropyLoss
12 | coef: [1, 0.4]
13 |
14 | model:
15 | type: GCNet
16 | backbone:
17 | type: ResNet50_vd
18 | output_stride: 8
19 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
20 | gc_channels: 512
21 | ratio: 0.25
22 | enable_auxiliary_loss: True
23 | align_corners: False
24 | pretrained: null
25 |
--------------------------------------------------------------------------------
/configs/ginet/ginet_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'ginet_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/ginet/ginet_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'ginet_resnet50_os8_voc12aug_512x512_40k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/ginet/ginet_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 |
7 | model:
8 | type: GINet
9 | backbone:
10 | type: ResNet50_vd
11 | output_stride: 8
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
13 | backbone_indices: [0, 1, 2, 3]
14 | enable_auxiliary_loss: True
15 | jpu: True
16 | align_corners: True
17 | pretrained: null
18 |
19 |
20 | loss:
21 | types:
22 | - type: CrossEntropyLoss
23 | coef: [1, 0.4]
24 |
--------------------------------------------------------------------------------
/configs/ginet/ginet_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 |
4 | model:
5 | type: GINet
6 | backbone:
7 | type: ResNet50_vd
8 | output_stride: 8
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | backbone_indices: [0, 1, 2, 3]
11 | enable_auxiliary_loss: True
12 | jpu: True
13 | align_corners: True
14 | pretrained: null
15 |
16 | loss:
17 | types:
18 | - type: CrossEntropyLoss
19 | coef: [1, 0.4]
20 |
--------------------------------------------------------------------------------
/configs/glore/glore_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | learning_rate:
7 | decay:
8 | end_lr: 1.0e-5
9 |
10 | loss:
11 | types:
12 | - type: CrossEntropyLoss
13 | coef: [1, 0.4]
14 |
15 | model:
16 | type: GloRe
17 | backbone:
18 | type: ResNet50_vd
19 | output_stride: 8
20 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
21 | enable_auxiliary_loss: True
22 | align_corners: False
23 | pretrained: null
24 |
--------------------------------------------------------------------------------
/configs/glore/glore_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 |
4 | model:
5 | type: GloRe
6 | backbone:
7 | type: ResNet50_vd
8 | output_stride: 8
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | enable_auxiliary_loss: True
11 | align_corners: False
12 | pretrained: null
13 |
14 | loss:
15 | types:
16 | - type: CrossEntropyLoss
17 | coef: [1, 0.4]
18 |
--------------------------------------------------------------------------------
/configs/hardnet/hardnet_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | lr_scheduler:
7 | type: PolynomialDecay
8 | learning_rate: 0.02
9 |
10 | optimizer:
11 | type: sgd
12 | momentum: 0.9
13 | weight_decay: 5.0e-4
14 |
15 | model:
16 | type: HarDNet
17 | pretrained: null
18 |
19 | loss:
20 | types:
21 | - type: BootstrappedCrossEntropyLoss
22 | min_K: 4096
23 | loss_th: 0.3
24 | coef: [1]
25 |
--------------------------------------------------------------------------------
/configs/isanet/isanet_resnet101_os8_cityscapes_769x769_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_769x769.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: ISANet
8 | isa_channels: 256
9 | backbone:
10 | type: ResNet101_vd
11 | output_stride: 8
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
13 | num_classes: 19
14 |
15 | optimizer:
16 | type: sgd
17 | momentum: 0.9
18 | weight_decay: 0.00001
19 |
20 | lr_scheduler:
21 | type: PolynomialDecay
22 | learning_rate: 0.01
23 | power: 0.9
24 |
25 | loss:
26 | types:
27 | - type: CrossEntropyLoss
28 | - type: CrossEntropyLoss
29 | coef: [1, 0.4]
30 |
--------------------------------------------------------------------------------
/configs/isanet/isanet_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: ISANet
5 | isa_channels: 256
6 | backbone:
7 | type: ResNet101_vd
8 | output_stride: 8
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
10 | align_corners: True
11 |
12 | optimizer:
13 | type: sgd
14 | momentum: 0.9
15 | weight_decay: 4.0e-05
16 |
17 | lr_scheduler:
18 | type: PolynomialDecay
19 | learning_rate: 0.01
20 | power: 0.9
21 |
22 | loss:
23 | types:
24 | - type: CrossEntropyLoss
25 | - type: CrossEntropyLoss
26 | coef: [1, 0.4]
27 |
--------------------------------------------------------------------------------
/configs/isanet/isanet_resnet50_os8_cityscapes_769x769_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_769x769.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: ISANet
8 | isa_channels: 256
9 | backbone:
10 | type: ResNet50_vd
11 | output_stride: 8
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
13 | num_classes: 19
14 |
15 | optimizer:
16 | type: sgd
17 | momentum: 0.9
18 | weight_decay: 0.00001
19 |
20 | lr_scheduler:
21 | type: PolynomialDecay
22 | learning_rate: 0.01
23 | power: 0.9
24 |
25 |
26 | loss:
27 | types:
28 | - type: CrossEntropyLoss
29 | - type: CrossEntropyLoss
30 | coef: [1, 0.4]
31 |
--------------------------------------------------------------------------------
/configs/isanet/isanet_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: ISANet
5 | isa_channels: 256
6 | backbone:
7 | type: ResNet50_vd
8 | output_stride: 8
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | align_corners: True
11 |
12 | optimizer:
13 | type: sgd
14 | momentum: 0.9
15 | weight_decay: 0.00001
16 |
17 | lr_scheduler:
18 | type: PolynomialDecay
19 | learning_rate: 0.01
20 | power: 0.9
21 |
22 | loss:
23 | types:
24 | - type: CrossEntropyLoss
25 | - type: CrossEntropyLoss
26 | coef: [1, 0.4]
27 |
--------------------------------------------------------------------------------
/configs/ocrnet/ocrnet_hrnetw18_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 160000
5 |
6 | model:
7 | type: OCRNet
8 | backbone:
9 | type: HRNet_W18
10 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
11 | num_classes: 19
12 | backbone_indices: [0]
13 |
14 | optimizer:
15 | type: sgd
16 |
17 | lr_scheduler:
18 | type: PolynomialDecay
19 | learning_rate: 0.01
20 | power: 0.9
21 |
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | - type: CrossEntropyLoss
27 | coef: [1, 0.4]
28 |
--------------------------------------------------------------------------------
/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | model:
4 | type: OCRNet
5 | backbone:
6 | type: HRNet_W18
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
8 | backbone_indices: [0]
9 |
10 | optimizer:
11 | type: sgd
12 |
13 | lr_scheduler:
14 | type: PolynomialDecay
15 | learning_rate: 0.01
16 | power: 0.9
17 |
18 |
19 | loss:
20 | types:
21 | - type: CrossEntropyLoss
22 | - type: CrossEntropyLoss
23 | coef: [1, 1]
24 |
--------------------------------------------------------------------------------
/configs/ocrnet/ocrnet_hrnetw48_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 |
2 | _base_: '../_base_/cityscapes.yml'
3 |
4 | batch_size: 2
5 | iters: 160000
6 |
7 | model:
8 | type: OCRNet
9 | backbone:
10 | type: HRNet_W48
11 |
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz
13 | num_classes: 19
14 | backbone_indices: [0]
15 |
16 | optimizer:
17 | type: sgd
18 |
19 | lr_scheduler:
20 | type: PolynomialDecay
21 | learning_rate: 0.01
22 | power: 0.9
23 |
24 |
25 |
26 | loss:
27 | types:
28 | - type: CrossEntropyLoss
29 | - type: CrossEntropyLoss
30 | coef: [1, 0.4]
31 |
--------------------------------------------------------------------------------
/configs/ocrnet/ocrnet_hrnetw48_cityscapes_1024x512_40k.yml:
--------------------------------------------------------------------------------
1 |
2 | _base_: '../_base_/cityscapes.yml'
3 |
4 | batch_size: 2
5 | iters: 40000
6 |
7 | model:
8 | type: OCRNet
9 | backbone:
10 | type: HRNet_W48
11 |
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz
13 | num_classes: 19
14 | backbone_indices: [0]
15 |
16 | optimizer:
17 | type: sgd
18 |
19 | lr_scheduler:
20 | type: PolynomialDecay
21 | learning_rate: 0.01
22 | power: 0.9
23 |
24 | loss:
25 | types:
26 | - type: CrossEntropyLoss
27 | - type: CrossEntropyLoss
28 | coef: [1, 0.4]
29 |
--------------------------------------------------------------------------------
/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: './ocrnet_hrnetw18_voc12aug_512x512_40k.yml'
2 |
3 | model:
4 | backbone:
5 | type: HRNet_W48
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/pointrend/pointrend_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'pointrend_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 |
4 | model:
5 | backbone:
6 | type: ResNet101_vd
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
8 |
--------------------------------------------------------------------------------
/configs/pointrend/pointrend_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'pointrend_resnet50_os8_voc12aug_512x512_40k.yml'
2 |
3 |
4 | model:
5 | backbone:
6 | type: ResNet101_vd
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
8 |
--------------------------------------------------------------------------------
/configs/pointrend/pointrend_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 |
4 | model:
5 | type: PointRend
6 | backbone:
7 | type: ResNet50_vd
8 | output_stride: 8
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | backbone_indices: [0, 1, 2, 3]
11 |
12 |
13 | loss:
14 | types:
15 | - type: CrossEntropyLoss
16 | - type: PointCrossEntropyLoss
17 | coef: [1, 1]
18 |
19 |
20 | optimizer:
21 | type: sgd
22 | momentum: 0.9
23 | weight_decay: 0.0005
24 |
--------------------------------------------------------------------------------
/configs/pointrend/pointrend_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 |
4 | model:
5 | type: PointRend
6 | backbone:
7 | type: ResNet50_vd
8 | output_stride: 8
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | backbone_indices: [0, 1, 2, 3]
11 |
12 |
13 | loss:
14 | types:
15 | - type: CrossEntropyLoss
16 | - type: PointCrossEntropyLoss
17 | coef: [1, 1]
18 |
19 |
20 | optimizer:
21 | type: sgd
22 | momentum: 0.9
23 | weight_decay: 0.0005
24 |
--------------------------------------------------------------------------------
/configs/pp_humanseg_lite/README.md:
--------------------------------------------------------------------------------
1 | # PP-HumanSeg-Lite
2 |
3 | A self-developed ultra lightweight model ConnectNet, is suitable for real-time segmentation scenarios on the web or mobile. See [paper](https://arxiv.org/abs/2112.07146) for more information.
4 |
5 | ## Network Structure
6 | 
7 |
8 | ## Performance
9 | Refer to [PP-HumanSeg](../../contrib/PP-HumanSeg).
10 |
--------------------------------------------------------------------------------
/configs/pp_humanseg_lite/pp_humanseg_lite_export_398x224.yml:
--------------------------------------------------------------------------------
1 |
2 | model:
3 | type: PPHumanSegLite
4 | align_corners: False
5 | num_classes: 2
6 |
7 | export:
8 | transforms:
9 | - type: Resize
10 | target_size: [398, 224]
11 | - type: Normalize
12 |
13 | val_dataset:
14 | type: Dataset
15 | dataset_root: data/mini_supervisely
16 | val_path: data/mini_supervisely/val.txt
17 | num_classes: 2
18 | transforms:
19 | - type: Resize
20 | target_size: [398, 224]
21 | - type: Normalize
22 | mode: val
23 |
--------------------------------------------------------------------------------
/configs/pp_humanseg_lite/pphumanseg_lite.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/configs/pp_humanseg_lite/pphumanseg_lite.png
--------------------------------------------------------------------------------
/configs/pp_liteseg/pp_liteseg_stdc1_camvid_960x720_10k_for_test.yml:
--------------------------------------------------------------------------------
1 | _base_: './pp_liteseg_stdc1_camvid_960x720_10k.yml'
2 |
3 | val_dataset:
4 | val_path: data/camvid/test.txt
5 |
--------------------------------------------------------------------------------
/configs/pp_liteseg/pp_liteseg_stdc1_cityscapes_1024x512_scale0.75_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: './pp_liteseg_stdc1_cityscapes_1024x512_scale0.5_160k.yml'
2 |
3 | test_config:
4 | aug_eval: True
5 | scales: 0.75
6 |
--------------------------------------------------------------------------------
/configs/pp_liteseg/pp_liteseg_stdc1_cityscapes_1024x512_scale1.0_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: './pp_liteseg_stdc1_cityscapes_1024x512_scale0.5_160k.yml'
2 |
3 | train_dataset:
4 | transforms:
5 | - type: ResizeStepScaling
6 | min_scale_factor: 0.5
7 | max_scale_factor: 2.0
8 | scale_step_size: 0.25
9 | - type: RandomPaddingCrop
10 | crop_size: [1024, 512]
11 | - type: RandomHorizontalFlip
12 | - type: RandomDistort
13 | brightness_range: 0.5
14 | contrast_range: 0.5
15 | saturation_range: 0.5
16 | - type: Normalize
17 | mode: train
18 |
19 | test_config:
20 | aug_eval: True
21 | scales: 1.0
22 |
--------------------------------------------------------------------------------
/configs/pp_liteseg/pp_liteseg_stdc2_camvid_960x720_10k.yml:
--------------------------------------------------------------------------------
1 | _base_: './pp_liteseg_stdc1_camvid_960x720_10k.yml'
2 |
3 | model:
4 | _inherited_: False # not inherit the model params from the base yaml
5 | type: PPLiteSeg
6 | backbone:
7 | type: STDC2
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet2.tar.gz
9 |
--------------------------------------------------------------------------------
/configs/pp_liteseg/pp_liteseg_stdc2_camvid_960x720_10k_for_test.yml:
--------------------------------------------------------------------------------
1 | _base_: './pp_liteseg_stdc1_camvid_960x720_10k.yml'
2 |
3 | val_dataset:
4 | val_path: data/camvid/test.txt
5 |
6 | model:
7 | _inherited_: False
8 | type: PPLiteSeg
9 | backbone:
10 | type: STDC2
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet2.tar.gz
12 |
--------------------------------------------------------------------------------
/configs/pp_liteseg/pp_liteseg_stdc2_cityscapes_1024x512_scale0.5_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: './pp_liteseg_stdc1_cityscapes_1024x512_scale0.5_160k.yml'
2 |
3 | test_config:
4 | aug_eval: True
5 | scales: 0.5
6 |
7 | model:
8 | _inherited_: False # not inherit the model params from the base yaml
9 | type: PPLiteSeg
10 | backbone:
11 | type: STDC2
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet2.tar.gz
13 |
--------------------------------------------------------------------------------
/configs/pp_liteseg/pp_liteseg_stdc2_cityscapes_1024x512_scale0.75_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: './pp_liteseg_stdc1_cityscapes_1024x512_scale0.5_160k.yml'
2 |
3 | test_config:
4 | aug_eval: True
5 | scales: 0.75
6 |
7 | model:
8 | _inherited_: False
9 | type: PPLiteSeg
10 | backbone:
11 | type: STDC2
12 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet2.tar.gz
13 |
--------------------------------------------------------------------------------
/configs/pspnet/pspnet_resnet101_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'pspnet_resnet50_os8_cityscapes_1024x512_80k.yml'
2 |
3 | model:
4 | backbone:
5 | type: ResNet101_vd
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
7 |
--------------------------------------------------------------------------------
/configs/pspnet/pspnet_resnet101_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'pspnet_resnet50_os8_voc12aug_512x512_40k.yml'
2 |
3 | train_dataset:
4 | transforms:
5 | - type: RandomPaddingCrop
6 | crop_size: [512, 512]
7 | - type: RandomHorizontalFlip
8 | - type: RandomDistort
9 | brightness_range: 0.4
10 | contrast_range: 0.4
11 | saturation_range: 0.4
12 | - type: Normalize
13 |
14 | model:
15 | backbone:
16 | type: ResNet101_vd
17 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
18 |
--------------------------------------------------------------------------------
/configs/pspnet/pspnet_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | lr_scheduler:
7 | type: PolynomialDecay
8 | learning_rate: 0.01
9 | power: 0.9
10 | end_lr: 1.0e-5
11 |
12 | loss:
13 | types:
14 | - type: CrossEntropyLoss
15 | coef: [1, 0.4]
16 |
17 | model:
18 | type: PSPNet
19 | backbone:
20 | type: ResNet50_vd
21 | output_stride: 8
22 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
23 | enable_auxiliary_loss: True
24 | align_corners: False
25 | pretrained: null
26 |
--------------------------------------------------------------------------------
/configs/pspnet/pspnet_resnet50_os8_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 | loss:
4 | types:
5 | - type: CrossEntropyLoss
6 | coef: [1, 0.4]
7 |
8 | model:
9 | type: PSPNet
10 | backbone:
11 | type: ResNet50_vd
12 | output_stride: 8
13 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
14 | enable_auxiliary_loss: True
15 | align_corners: False
16 | pretrained: null
17 |
--------------------------------------------------------------------------------
/configs/pssl/pp_liteseg_stdc2_cityscapes_1024x512_scale1.0_160k_pssl.yml:
--------------------------------------------------------------------------------
1 | _base_: '../pp_liteseg/pp_liteseg_stdc2_cityscapes_1024x512_scale1.0_160k.yml'
2 |
3 | model:
4 | _inherited_: False
5 | type: PPLiteSeg
6 | backbone:
7 | type: STDC2
8 | pretrained: null
9 | pretrained: /root/codespace/PaddleSeg-release-2.5/work_dirs_pp_liteseg_stdc2_pssl/snapshot/iter_66725/model.pdparams
10 |
--------------------------------------------------------------------------------
/configs/pssl/pp_liteseg_stdc2_pssl.yml:
--------------------------------------------------------------------------------
1 | _base_: 'pp_liteseg_stdc1_pssl.yml'
2 |
3 | model:
4 | _inherited_: False # not inherit the model params from the base yaml
5 | type: PPLiteSeg
6 | backbone:
7 | type: STDC2
8 | relative_lr: 0.1
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet2.tar.gz
10 | pretrained: null
11 | num_classes: 1001
12 |
--------------------------------------------------------------------------------
/configs/pssl/stdc2_seg_pssl.yml:
--------------------------------------------------------------------------------
1 | _base_: 'stdc1_seg_pssl.yml'
2 |
3 | model:
4 | backbone:
5 | type: STDC2
6 | relative_lr: 0.1
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/STDCNet2.tar.gz
8 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b0_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 2
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B0
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b0.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
28 | test_config:
29 | is_slide: True
30 | crop_size: [1024, 1024]
31 | stride: [768, 768]
32 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b0_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 1
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B0
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b0.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b1_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 2
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B1
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b1.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
28 | test_config:
29 | is_slide: True
30 | crop_size: [1024, 1024]
31 | stride: [768, 768]
32 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b1_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 1
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B1
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b1.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b2_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 2
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B2
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b2.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
28 | test_config:
29 | is_slide: True
30 | crop_size: [1024, 1024]
31 | stride: [768, 768]
32 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b2_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 1
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B2
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b2.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b3_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 2
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B3
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b3.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
28 | test_config:
29 | is_slide: True
30 | crop_size: [1024, 1024]
31 | stride: [768, 768]
32 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b3_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 1
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B3
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b3.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b4_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 2
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B4
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b4.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
28 | test_config:
29 | is_slide: True
30 | crop_size: [1024, 1024]
31 | stride: [768, 768]
32 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b4_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 1
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B4
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b4.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b5_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | batch_size: 1
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B5
8 | num_classes: 19
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b5.tar.gz
10 |
11 | optimizer:
12 | _inherited_: False
13 | type: AdamW
14 | beta1: 0.9
15 | beta2: 0.999
16 | weight_decay: 0.01
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.00006
21 | power: 1
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
28 | test_config:
29 | is_slide: True
30 | crop_size: [1024, 1024]
31 | stride: [768, 768]
32 |
--------------------------------------------------------------------------------
/configs/segformer/segformer_b5_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 1
4 | iters: 160000
5 |
6 | model:
7 | type: SegFormer_B5
8 | num_classes: 19
9 |
10 | optimizer:
11 | _inherited_: False
12 | type: AdamW
13 | beta1: 0.9
14 | beta2: 0.999
15 | weight_decay: 0.01
16 |
17 | lr_scheduler:
18 | type: PolynomialDecay
19 | learning_rate: 0.00006
20 | power: 1
21 |
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | coef: [1]
27 |
--------------------------------------------------------------------------------
/configs/segmenter/segmenter_vit_base_mask_ade20k_512x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: './segmenter_vit_base_linear_ade20k_512x512_160k.yml'
2 |
3 | model:
4 | type: MaskSegmenter
5 | h_embed_dim: 768
6 | h_depth: 2
7 | h_num_heads: 12
8 | h_mlp_ratio: 4
9 | h_drop_rate: 0.0
10 | h_drop_path_rate: 0.1
11 |
--------------------------------------------------------------------------------
/configs/segmenter/segmenter_vit_small_linear_ade20k_512x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: './segmenter_vit_base_linear_ade20k_512x512_160k.yml'
2 |
3 | model:
4 | type: LinearSegmenter
5 | backbone:
6 | type: VisionTransformer
7 | img_size: 512
8 | patch_size: 16
9 | embed_dim: 384
10 | depth: 12
11 | num_heads: 6
12 | mlp_ratio: 4
13 | qkv_bias: True
14 | drop_rate: 0.0
15 | drop_path_rate: 0.1
16 | final_norm: True
17 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/pretrained_models/vit_small_patch16_384_augreg.tar.gz
18 |
--------------------------------------------------------------------------------
/configs/segmenter/segmenter_vit_small_mask_ade20k_512x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: './segmenter_vit_small_linear_ade20k_512x512_160k.yml'
2 |
3 | model:
4 | type: MaskSegmenter
5 | h_embed_dim: 384
6 | h_depth: 2
7 | h_num_heads: 6
8 | h_mlp_ratio: 4
9 | h_drop_rate: 0.0
10 | h_drop_path_rate: 0.1
11 |
--------------------------------------------------------------------------------
/configs/segnet/segnet_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: SegNet
8 | pretrained: Null
9 |
--------------------------------------------------------------------------------
/configs/sfnet/sfnet_resnet50_os8_cityscapes_1024x1024_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'sfnet_resnet18_os8_cityscapes_1024x1024_80k.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | model:
7 | type: SFNet
8 | backbone:
9 | type: ResNet50_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
12 | backbone_indices: [0, 1, 2, 3]
13 |
--------------------------------------------------------------------------------
/configs/smrt/bisenetv2.yml:
--------------------------------------------------------------------------------
1 | _base_: './base_cfg.yml'
2 |
3 | model:
4 | type: BiSeNetV2
5 |
6 | loss:
7 | types:
8 | - type: MixedLoss
9 | losses:
10 | - type: OhemCrossEntropyLoss
11 | min_kept: 65000
12 | - type: LovaszSoftmaxLoss
13 | coef: [0.8, 0.2]
14 | coef: [1, 1, 1, 1, 1]
--------------------------------------------------------------------------------
/configs/smrt/deeplabv3p_resnet50_os8.yml:
--------------------------------------------------------------------------------
1 | _base_: './base_cfg.yml'
2 |
3 | model:
4 | type: DeepLabV3P
5 | backbone:
6 | type: ResNet50_vd
7 | output_stride: 8
8 | multi_grid: [1, 2, 4]
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
10 | backbone_indices: [0, 3]
11 | aspp_ratios: [1, 12, 24, 36]
12 | aspp_out_channels: 256
13 | align_corners: False
14 | pretrained: null
15 |
16 |
17 | loss:
18 | types:
19 | - type: MixedLoss
20 | losses:
21 | - type: OhemCrossEntropyLoss
22 | min_kept: 65000
23 | - type: LovaszSoftmaxLoss
24 | coef: [0.8, 0.2]
25 | coef: [1]
--------------------------------------------------------------------------------
/configs/smrt/ocrnet_hrnetw18.yml:
--------------------------------------------------------------------------------
1 | _base_: './base_cfg.yml'
2 |
3 | model:
4 | type: OCRNet
5 | backbone:
6 | type: HRNet_W18
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
8 | backbone_indices: [0]
9 |
10 | loss:
11 | types:
12 | - type: MixedLoss
13 | losses:
14 | - type: OhemCrossEntropyLoss
15 | min_kept: 65000
16 | - type: LovaszSoftmaxLoss
17 | coef: [0.8, 0.2]
18 | coef: [1, 0.4]
--------------------------------------------------------------------------------
/configs/smrt/pp_liteseg_stdc1.yml:
--------------------------------------------------------------------------------
1 | _base_: './base_cfg.yml'
2 |
3 | model:
4 | type: PPLiteSeg
5 | backbone:
6 | type: STDC1
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet1.tar.gz
8 | arm_out_chs: [32, 64, 128]
9 | seg_head_inter_chs: [32, 64, 64]
10 |
11 | loss:
12 | types:
13 | - type: MixedLoss
14 | losses:
15 | - type: OhemCrossEntropyLoss
16 | min_kept: 65000
17 | - type: LovaszSoftmaxLoss
18 | coef: [0.8, 0.2]
19 | coef: [1, 1, 1]
--------------------------------------------------------------------------------
/configs/smrt/pp_liteseg_stdc2.yml:
--------------------------------------------------------------------------------
1 | _base_: './base_cfg.yml'
2 |
3 | model:
4 | type: PPLiteSeg
5 | backbone:
6 | type: STDC2
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet2.tar.gz
8 |
9 | loss:
10 | types:
11 | - type: MixedLoss
12 | losses:
13 | - type: OhemCrossEntropyLoss
14 | min_kept: 65000
15 | - type: LovaszSoftmaxLoss
16 | coef: [0.8, 0.2]
17 | coef: [1, 1, 1]
--------------------------------------------------------------------------------
/configs/smrt/sfnet_resnet18_os8.yml:
--------------------------------------------------------------------------------
1 | _base_: './base_cfg.yml'
2 |
3 | model:
4 | type: SFNet
5 | backbone:
6 | type: ResNet18_vd
7 | output_stride: 8
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet18_vd_ssld_v2.tar.gz
9 | backbone_indices: [0, 1, 2, 3]
10 |
11 | loss:
12 | types:
13 | - type: MixedLoss
14 | losses:
15 | - type: OhemCrossEntropyLoss
16 | min_kept: 65000
17 | - type: LovaszSoftmaxLoss
18 | coef: [0.8, 0.2]
19 | coef: [1]
--------------------------------------------------------------------------------
/configs/stdcseg/stdc1_seg_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 12
4 | iters: 80000
5 |
6 | model:
7 | type: STDCSeg
8 | backbone:
9 | type: STDC1
10 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/STDCNet1.tar.gz
11 | pretrained: null
12 |
13 | loss:
14 | types:
15 | - type: OhemCrossEntropyLoss
16 | - type: OhemCrossEntropyLoss
17 | - type: OhemCrossEntropyLoss
18 | - type: DetailAggregateLoss
19 | coef: [1, 1, 1, 1]
20 |
--------------------------------------------------------------------------------
/configs/stdcseg/stdc1_seg_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/pascal_voc12aug.yml'
2 |
3 |
4 | model:
5 | type: STDCSeg
6 | backbone:
7 | type: STDC1
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/STDCNet1.tar.gz
9 | pretrained: null
10 |
11 | loss:
12 | types:
13 | - type: OhemCrossEntropyLoss
14 | - type: OhemCrossEntropyLoss
15 | - type: OhemCrossEntropyLoss
16 | - type: DetailAggregateLoss
17 | coef: [1, 1, 1, 1]
18 |
--------------------------------------------------------------------------------
/configs/stdcseg/stdc2_seg_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'stdc1_seg_cityscapes_1024x512_80k.yml'
2 |
3 |
4 | model:
5 | backbone:
6 | type: STDC2
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/STDCNet2.tar.gz
8 |
--------------------------------------------------------------------------------
/configs/stdcseg/stdc2_seg_voc12aug_512x512_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'stdc1_seg_voc12aug_512x512_40k.yml'
2 |
3 |
4 | model:
5 | backbone:
6 | type: STDC2
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/STDCNet2.tar.gz
8 |
--------------------------------------------------------------------------------
/configs/u2net/README.md:
--------------------------------------------------------------------------------
1 | # U2-Net: Going deeper with nested U-structure for salient object detection
2 |
3 | ## Reference
4 | > Qin, Xuebin, Zichen Zhang, Chenyang Huang, Masood Dehghan, Osmar R. Zaiane, and Martin Jagersand. "U2-Net: Going deeper with nested U-structure for salient object detection." Pattern Recognition 106 (2020): 107404.
5 |
--------------------------------------------------------------------------------
/configs/u2net/u2net_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | model:
7 | type: U2Net
8 | num_classes: 19
9 | pretrained: Null
10 |
11 | loss:
12 | coef: [1, 1, 1, 1, 1, 1, 1]
13 |
--------------------------------------------------------------------------------
/configs/u2net/u2netp_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | model:
7 | type: U2Netp
8 | num_classes: 19
9 | pretrained: Null
10 |
11 | loss:
12 | coef: [1, 1, 1, 1, 1, 1, 1]
13 |
--------------------------------------------------------------------------------
/configs/unet/unet_chasedb1_128x128_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/chase_db1.yml'
2 |
3 | batch_size: 4
4 | iters: 40000
5 |
6 | model:
7 | type: UNet
8 | num_classes: 2
9 | use_deconv: False
10 | pretrained: Null
11 |
--------------------------------------------------------------------------------
/configs/unet/unet_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | model:
7 | type: UNet
8 | num_classes: 19
9 | use_deconv: False
10 | pretrained: Null
11 |
--------------------------------------------------------------------------------
/configs/unet/unet_drive_128x128_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/drive.yml'
2 |
3 | batch_size: 4
4 | iters: 40000
5 |
6 | model:
7 | type: UNet
8 | num_classes: 2
9 | use_deconv: False
10 | pretrained: Null
11 |
--------------------------------------------------------------------------------
/configs/unet/unet_hrf_256x256_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/hrf.yml'
2 |
3 | batch_size: 4
4 | iters: 40000
5 |
6 | model:
7 | type: UNet
8 | num_classes: 2
9 | use_deconv: False
10 | pretrained: Null
11 |
--------------------------------------------------------------------------------
/configs/unet/unet_stare_128x128_40k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/stare.yml'
2 |
3 | batch_size: 4
4 | iters: 40000
5 |
6 | model:
7 | type: UNet
8 | num_classes: 2
9 | use_deconv: False
10 | pretrained: Null
11 |
--------------------------------------------------------------------------------
/configs/unet_3plus/README.md:
--------------------------------------------------------------------------------
1 | # UNet 3+: A Full-Scale Connected UNet for Medical Image Segmentation
2 |
3 | ## Reference
4 |
5 | > Huang H , Lin L , Tong R , et al. UNet 3+: A Full-Scale Connected UNet for Medical Image Segmentation[J]. ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2020.
6 |
--------------------------------------------------------------------------------
/configs/unet_3plus/unet_3plus_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | model:
7 | type: UNet3Plus
8 | in_channels: 3
9 | num_classes: 19
10 | is_batchnorm: True
11 | is_deepsup: False
12 | is_CGM: False
13 |
--------------------------------------------------------------------------------
/configs/unet_plusplus/README.md:
--------------------------------------------------------------------------------
1 | # A Nested U-Net Architecture for Medical Image Segmentation
2 |
3 | ## Reference
4 |
5 | > Zhou, Zongwei, Md Mahfuzur Rahman Siddiquee, Nima Tajbakhsh, and Jianming Liang. "Unet++: A nested u-net architecture for medical image segmentation." In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support, pp. 3-11. Springer, Cham, 2018.
6 |
--------------------------------------------------------------------------------
/configs/unet_plusplus/unet_plusplus_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | model:
7 | type: UNetPlusPlus
8 | in_channels: 3
9 | num_classes: 19
10 | use_deconv: False
11 | align_corners: False
12 | pretrained: Null
13 | is_ds: True
14 |
--------------------------------------------------------------------------------
/contrib/CityscapesSOTA/configs/README.md:
--------------------------------------------------------------------------------
1 | # Hierarchical multi-scale attention for semantic segmentation
2 |
3 | ## Reference
4 | > Tao, Andrew, Karan Sapra, and Bryan Catanzaro. "Hierarchical multi-scale attention for semantic segmentation." arXiv preprint arXiv:2005.10821 (2020).
5 |
--------------------------------------------------------------------------------
/contrib/DomainAdaptation/requirements.txt:
--------------------------------------------------------------------------------
1 | albumentations
2 | paddleseg==2.3.0
3 |
--------------------------------------------------------------------------------
/contrib/DomainAdaptation/run-DA_src.sh:
--------------------------------------------------------------------------------
1 | export CUDA_VISIBLE_DEVICES=2
2 |
3 | yml=deeplabv2_resnet101_os8_gta5cityscapes_1280x640_160k_newds_gta5src
4 | save_dir=saved_model_develop/${yml}_test
5 | mkdir -p ${save_dir}
6 |
7 | python train.py \
8 | --config configs/deeplabv2/${yml}.yml --use_vdl --save_dir $save_dir \
9 | --save_interval 1000 --log_iters 30 \
10 | --num_workers 4 --do_eval \
11 | --keep_checkpoint_max 10 --seed 1234 \
12 | 2>&1 | tee ${save_dir}/log \
13 |
--------------------------------------------------------------------------------
/contrib/LaneSeg/data/images/added_prediction/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/LaneSeg/data/images/added_prediction/3.jpg
--------------------------------------------------------------------------------
/contrib/LaneSeg/data/images/points/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/LaneSeg/data/images/points/3.jpg
--------------------------------------------------------------------------------
/contrib/LaneSeg/data/images/pseudo_color_prediction/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/LaneSeg/data/images/pseudo_color_prediction/3.png
--------------------------------------------------------------------------------
/contrib/LaneSeg/data/test_images/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/LaneSeg/data/test_images/3.jpg
--------------------------------------------------------------------------------
/contrib/LaneSeg/third_party/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/configs/_base_/global_configs.yml:
--------------------------------------------------------------------------------
1 | data_root: data/
2 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/configs/lung_coronavirus/vnet_lung_coronavirus_128_128_128_15k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'lung_coronavirus.yml'
2 |
3 | model:
4 | type: VNet
5 | elu: False
6 | in_channels: 1
7 | num_classes: 3
8 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/lung_coronavirus/vnet_lung_coronavirus_128_128_128_15k/pretrain/model.pdparams
9 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/configs/mri_spine_seg/vnet_mri_spine_seg_512_512_12_15k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'mri_spine_seg_1e-2_big_rmresizecrop_class20.yml'
2 |
3 | model:
4 | type: VNet
5 | elu: False
6 | in_channels: 1
7 | num_classes: 20
8 | pretrained: null
9 | kernel_size: [[2,2,4], [2,2,2], [2,2,2], [2,2,2]]
10 | stride_size: [[2,2,1], [2,2,1], [2,2,2], [2,2,2]]
11 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/configs/mri_spine_seg/vnetdeepsup_mri_spine_seg_512_512_12_15k.yml:
--------------------------------------------------------------------------------
1 | _base_: 'mri_spine_seg_1e-2_big_rmresizecrop_class20.yml'
2 |
3 | model:
4 | type: VNetDeepSup
5 | elu: False
6 | in_channels: 1
7 | num_classes: 20
8 | pretrained: null
9 | kernel_size: [[2,2,4], [2,2,2], [2,2,2], [2,2,2]]
10 | stride_size: [[2,2,1], [2,2,1], [2,2,2], [2,2,2]]
11 |
12 | loss:
13 | types:
14 | - type: MixedLoss
15 | losses:
16 | - type: CrossEntropyLoss
17 | weight: Null
18 | - type: DiceLoss
19 | coef: [1, 1]
20 | coef: [0.25, 0.25, 0.25, 0.25]
21 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/configs/msd_brain_seg/unetr_msd_brain_seg_1e-4.yml:
--------------------------------------------------------------------------------
1 | _base_: 'msd_brain_seg_1e-4.yml'
2 |
3 | model:
4 | type: UNETR
5 | img_shape: (128, 128, 128)
6 | in_channels: 4
7 | num_classes: 4
8 | embed_dim: 768
9 | patch_size: 16
10 | num_heads: 12
11 | dropout: 0.1
--------------------------------------------------------------------------------
/contrib/MedicalSeg/configs/schedulers/two_stage_coarseseg_fineseg.yml:
--------------------------------------------------------------------------------
1 | configs:
2 | config1: a.yml
3 | config2: b.yml
4 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/requirements.txt:
--------------------------------------------------------------------------------
1 | scikit-image
2 | numpy
3 | paddlepaddle-gpu>=2.2.0
4 | SimpleITK>=2.1.1
5 | PyYAML
6 | pynrrd
7 | tqdm
8 | visualdl
9 | sklearn
10 | filelock
11 | nibabel
12 | pydicom
13 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/tools/__init__.py:
--------------------------------------------------------------------------------
1 | from .prepare import Prep
2 | from .preprocess_utils import *
3 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/tools/preprocess_globals.yml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/tools/preprocess_utils/__init__.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import codecs
3 | from . import global_var
4 | # Import global_val then everywhere else can change/use the global dict
5 | with codecs.open('tools/preprocess_globals.yml', 'r', 'utf-8') as file:
6 | dic = yaml.load(file, Loader=yaml.FullLoader)
7 | global_var.init()
8 | if dic['use_gpu']:
9 | global_var.set_value('USE_GPU', True)
10 | else:
11 | global_var.set_value('USE_GPU', False)
12 |
13 | from .values import *
14 | from .uncompress import uncompressor
15 | from .geometry import *
16 | from .load_image import *
17 | from .dataset_json import parse_msd_basic_info
18 |
--------------------------------------------------------------------------------
/contrib/MedicalSeg/tools/preprocess_utils/dataset_json.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 |
4 | def parse_msd_basic_info(json_path):
5 | """
6 | get dataset basic info from msd dataset.json
7 | """
8 | dict = json.loads(open(json_path, "r").read())
9 | info = {}
10 | info["modalities"] = tuple(dict["modality"].values())
11 | info["labels"] = dict["labels"]
12 | info["dataset_name"] = dict["name"]
13 | info["dataset_description"] = dict["description"]
14 | info["license_desc"] = dict["licence"]
15 | info["dataset_reference"] = dict["reference"]
16 | return info
17 |
--------------------------------------------------------------------------------
/contrib/PP-HumanSeg/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/PP-HumanSeg/src/__init__.py
--------------------------------------------------------------------------------
/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml:
--------------------------------------------------------------------------------
1 | _base_: ./panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml
2 |
3 | batch_size: 8
4 |
5 | train_dataset:
6 | transforms:
7 | - type: ResizeStepScaling
8 | min_scale_factor: 0.5
9 | max_scale_factor: 2.0
10 | scale_step_size: 0.25
11 | - type: RandomPaddingCrop
12 | crop_size: [1025, 513]
13 | label_padding_value: [0, 0, 0]
14 | - type: RandomHorizontalFlip
15 | - type: RandomDistort
16 | brightness_range: 0.4
17 | contrast_range: 0.4
18 | saturation_range: 0.4
19 | - type: Normalize
20 |
--------------------------------------------------------------------------------
/contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg
--------------------------------------------------------------------------------
/contrib/PanopticDeepLab/docs/visualization_instance.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/PanopticDeepLab/docs/visualization_instance.png
--------------------------------------------------------------------------------
/contrib/PanopticDeepLab/docs/visualization_panoptic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/PanopticDeepLab/docs/visualization_panoptic.png
--------------------------------------------------------------------------------
/contrib/PanopticDeepLab/docs/visualization_semantic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/contrib/PanopticDeepLab/docs/visualization_semantic.png
--------------------------------------------------------------------------------
/deploy/cpp/README.md:
--------------------------------------------------------------------------------
1 | # PaddleSeg C++ Predictive Deployment Scenario
2 |
3 | ## Deploy the PaddleSeg model using Paddle Inference C++
4 |
5 | To deploy PaddleSeg model using Paddle Inference C++, please refer to [Tutorial](../../docs/deployment/inference/cpp_inference.md).
6 |
7 | ## Deploy the PaddleSeg model using PaddleX
8 |
9 | Currently, PaddleSeg model C++ deployment can be performed based on PaddleX ([Deployment Tutorial](https://github.com/PaddlePaddle/PaddleX/tree/develop/deploy/cpp)).
10 |
11 | Hardware support:
12 | * CPU(linux/windows)
13 | * GPU(linux/windows)
14 | * Jetson(TX2/Nano/Xavier)
15 |
--------------------------------------------------------------------------------
/deploy/cpp/README_cn.md:
--------------------------------------------------------------------------------
1 | # PaddleSeg C++ 预测部署方案
2 |
3 | ## 使用Paddle Inference C++部署PaddleSeg模型
4 |
5 | 使用Paddle Inference C++部署PaddleSeg模型,请参考[教程](../../docs/deployment/inference/cpp_inference.md)。
6 |
7 | ## 使用PaddleX部署PaddleSeg模型
8 |
9 | 目前可基于PaddleX进行PaddleSeg模型C++部署([部署教程](https://github.com/PaddlePaddle/PaddleX/tree/develop/deploy/cpp))。
10 |
11 | 硬件支持
12 | * CPU(linux/windows)
13 | * GPU(linux/windows)
14 | * Jetson(TX2/Nano/Xavier)
15 |
--------------------------------------------------------------------------------
/deploy/cpp/run_seg_gpu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set +x
3 | set -e
4 |
5 | WITH_MKL=ON
6 | WITH_GPU=ON
7 | USE_TENSORRT=OFF
8 | DEMO_NAME=test_seg
9 |
10 | work_path=$(dirname $(readlink -f $0))
11 | LIB_DIR="${work_path}/paddle_inference"
12 |
13 | # compile
14 | mkdir -p build
15 | cd build
16 | rm -rf *
17 |
18 | cmake .. \
19 | -DDEMO_NAME=${DEMO_NAME} \
20 | -DWITH_MKL=${WITH_MKL} \
21 | -DWITH_GPU=${WITH_GPU} \
22 | -DUSE_TENSORRT=${USE_TENSORRT} \
23 | -DWITH_STATIC_LIB=OFF \
24 | -DPADDLE_LIB=${LIB_DIR}
25 |
26 | make -j
27 |
28 | # run
29 | cd ..
30 |
31 | ./build/test_seg \
32 | --model_dir=./stdc1seg_infer_model \
33 | --img_path=./cityscapes_demo.png \
34 | --devices=GPU
35 |
--------------------------------------------------------------------------------
/deploy/lite/README.md:
--------------------------------------------------------------------------------
1 | Use Paddle Lite to deploy inference model on Android mobile phone, please refer to [document](../../docs/deployment/lite/lite.md).
2 |
3 | 使用Paddle Lite在安卓手机上部署预测模型,请参考[文档](../../docs/deployment/lite/lite_cn.md).
4 |
--------------------------------------------------------------------------------
/deploy/lite/example/human_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/example/human_1.png
--------------------------------------------------------------------------------
/deploy/lite/example/human_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/example/human_2.png
--------------------------------------------------------------------------------
/deploy/lite/example/human_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/example/human_3.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .gradle
3 | /local.properties
4 | /.idea/caches
5 | /.idea/libraries
6 | /.idea/modules.xml
7 | /.idea/workspace.xml
8 | /.idea/navEditor.xml
9 | /.idea/assetWizardSettings.xml
10 | .DS_Store
11 | /build
12 | /captures
13 | .externalNativeBuild
14 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/.gitignore:
--------------------------------------------------------------------------------
1 | /build
2 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/local.properties:
--------------------------------------------------------------------------------
1 | ## This file must *NOT* be checked into Version Control Systems,
2 | # as it contains information specific to your local configuration.
3 | #
4 | # Location of the SDK. This is only used by Gradle.
5 | # For customization when using a Version Control System, please read the
6 | # header note.
7 | #Mon Nov 25 17:01:52 CST 2019
8 | sdk.dir=/Users/chenlingchi/Library/Android/sdk
9 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/images/human.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/images/human.jpg
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/labels/label_list:
--------------------------------------------------------------------------------
1 | background
2 | human
3 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/main/res/values/colors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | #008577
4 | #00574B
5 | #D81B60
6 |
7 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/app/src/test/java/com/baidu/paddle/lite/demo/ExampleUnitTest.java:
--------------------------------------------------------------------------------
1 | package com.baidu.paddle.lite.demo;
2 |
3 | import org.junit.Test;
4 |
5 | import static org.junit.Assert.*;
6 |
7 | /**
8 | * Example local unit test, which will execute on the development machine (host).
9 | *
10 | * @see Testing documentation
11 | */
12 | public class ExampleUnitTest {
13 | @Test
14 | public void addition_isCorrect() {
15 | assertEquals(4, 2 + 2);
16 | }
17 | }
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Thu Aug 22 15:05:37 CST 2019
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-5.1.1-all.zip
7 |
--------------------------------------------------------------------------------
/deploy/lite/human_segmentation_demo/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':app'
2 |
--------------------------------------------------------------------------------
/deploy/python/README.md:
--------------------------------------------------------------------------------
1 | Please refer to the [tutorial](../../docs/deployment/inference/python_inference.md) for Python deployment using Paddle Inference.
2 |
--------------------------------------------------------------------------------
/deploy/serving/README.md:
--------------------------------------------------------------------------------
1 | Please refer to the [tutorial](../../docs/deployment/serving/serving.md) for deployment using Paddle Serving.
2 |
--------------------------------------------------------------------------------
/deploy/web/README.md:
--------------------------------------------------------------------------------
1 | Use Paddle.js to deploy inference model on web, please refer to [document](../../docs/deployment/web/web.md).
2 |
3 | 使用Paddle.js在网页上部署预测模型,请参考[文档](../../docs/deployment/web/web_cn.md).
4 |
--------------------------------------------------------------------------------
/deploy/web/example/bg/bg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/deploy/web/example/bg/bg.jpg
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # PaddleSeg Document
2 | All PaddleSeg tutorials are organized as the format of Read the Docs.
3 |
--------------------------------------------------------------------------------
/docs/apis/README.md:
--------------------------------------------------------------------------------
1 | English | [简体中文](README_CN.md)
2 | ## Data Transformation (Data Augmentation)
3 | [paddleseg.transforms](./transforms/transforms.md)
4 |
5 | ## Dataset Processing
6 | [paddleseg.datasets](./datasets/datasets.md)
7 |
8 | ## Semantic Segmentation Model Set
9 | [paddleseg.models](./models/models.md)
10 |
11 | ## Backbone Networks
12 | [paddleseg.models.backbone](./backbones/backbones.md)
13 |
14 | ## Training,Evaluating and Predicting
15 | [paddleseg.core](./core/core.md)
16 |
17 | ## Computer Vision Library
18 | [paddleseg.cvlibs](./cvlibs/cvlibs.md)
19 |
--------------------------------------------------------------------------------
/docs/apis/README_CN.md:
--------------------------------------------------------------------------------
1 | 简体中文 | [English](README.md)
2 | ## 数据变换(数据增强)
3 | [paddleseg.transforms](./transforms/transforms_cn.md)
4 |
5 | ## 数据集处理
6 | [paddleseg.datasets](./datasets/datasets_cn.md)
7 |
8 | ## 语义分割模型集
9 | [paddleseg.models](./models/models_cn.md)
10 |
11 | ## 骨干网络
12 | [paddleseg.models.backbone](./backbones/backbones_cn.md)
13 |
14 | ## 训练、评估和预测
15 | [paddleseg.core](./core/core_cn.md)
16 |
17 | ## 计算机视觉库
18 | [paddleseg.cvlibs](./cvlibs/cvlibs_cn.md)
19 |
--------------------------------------------------------------------------------
/docs/apis/backbones/index.rst:
--------------------------------------------------------------------------------
1 | 骨干网络
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | backbones.md
8 |
--------------------------------------------------------------------------------
/docs/apis/core/index.rst:
--------------------------------------------------------------------------------
1 | 训练、评估和预测
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | core.md
8 |
--------------------------------------------------------------------------------
/docs/apis/cvlibs/index.rst:
--------------------------------------------------------------------------------
1 | 视觉通用工具类
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | cvlibs.md
8 |
--------------------------------------------------------------------------------
/docs/apis/index.rst:
--------------------------------------------------------------------------------
1 | API接口说明
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | transforms/index.rst
8 | models/index.rst
9 | backbones/index.rst
10 | core/index.rst
11 | cvlibs/index.rst
12 |
--------------------------------------------------------------------------------
/docs/apis/losses/index.rst:
--------------------------------------------------------------------------------
1 | 损失函数
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | losses.md
8 |
--------------------------------------------------------------------------------
/docs/apis/models/index.rst:
--------------------------------------------------------------------------------
1 | 视觉模型集
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | models.md
8 |
--------------------------------------------------------------------------------
/docs/apis/transforms/index.rst:
--------------------------------------------------------------------------------
1 | 数据处理与增强
2 | ============================
3 |
4 | transforms为PaddleSeg的模型训练提供了数据的预处理和数据增强接口。
5 |
6 | .. toctree::
7 | :maxdepth: 1
8 |
9 | transforms.md
10 | datasets.md
11 |
--------------------------------------------------------------------------------
/docs/data/README.md:
--------------------------------------------------------------------------------
1 | Coming
2 |
--------------------------------------------------------------------------------
/docs/data/custom/index.rst:
--------------------------------------------------------------------------------
1 | 自定义数据集
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | data_prepare.md
8 |
--------------------------------------------------------------------------------
/docs/data/image/ITK-SNAP.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/ITK-SNAP.png
--------------------------------------------------------------------------------
/docs/data/image/LabelMeing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/LabelMeing.png
--------------------------------------------------------------------------------
/docs/data/image/file_list.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/file_list.png
--------------------------------------------------------------------------------
/docs/data/image/file_list2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/file_list2.png
--------------------------------------------------------------------------------
/docs/data/image/image-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-1.png
--------------------------------------------------------------------------------
/docs/data/image/image-10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-10.jpg
--------------------------------------------------------------------------------
/docs/data/image/image-11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-11.png
--------------------------------------------------------------------------------
/docs/data/image/image-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-2.png
--------------------------------------------------------------------------------
/docs/data/image/image-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-3.png
--------------------------------------------------------------------------------
/docs/data/image/image-4-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-4-1.png
--------------------------------------------------------------------------------
/docs/data/image/image-4-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-4-2.png
--------------------------------------------------------------------------------
/docs/data/image/image-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-5.png
--------------------------------------------------------------------------------
/docs/data/image/image-6-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-6-2.png
--------------------------------------------------------------------------------
/docs/data/image/image-6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-6.png
--------------------------------------------------------------------------------
/docs/data/image/image-7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/image-7.png
--------------------------------------------------------------------------------
/docs/data/image/jingling-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/jingling-1.png
--------------------------------------------------------------------------------
/docs/data/image/jingling-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/jingling-2.png
--------------------------------------------------------------------------------
/docs/data/image/jingling-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/jingling-3.png
--------------------------------------------------------------------------------
/docs/data/image/jingling-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/jingling-4.png
--------------------------------------------------------------------------------
/docs/data/image/jingling-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/jingling-5.png
--------------------------------------------------------------------------------
/docs/data/image/labelme_polygons.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/data/image/labelme_polygons.jpg
--------------------------------------------------------------------------------
/docs/data/marker/index.rst:
--------------------------------------------------------------------------------
1 | 数据格式说明
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | marker.md
8 |
--------------------------------------------------------------------------------
/docs/data/transform/index.rst:
--------------------------------------------------------------------------------
1 | 数据标注和转换
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | transform.md
8 |
--------------------------------------------------------------------------------
/docs/deployment/inference/index.rst:
--------------------------------------------------------------------------------
1 | 本地inference部署
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | python_inference.md
8 | cpp_inference.md
9 |
--------------------------------------------------------------------------------
/docs/deployment/lite/example/human.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/deployment/lite/example/human.png
--------------------------------------------------------------------------------
/docs/deployment/lite/example/human_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/deployment/lite/example/human_1.png
--------------------------------------------------------------------------------
/docs/deployment/lite/example/human_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/deployment/lite/example/human_2.png
--------------------------------------------------------------------------------
/docs/deployment/lite/example/human_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/deployment/lite/example/human_3.png
--------------------------------------------------------------------------------
/docs/deployment/lite/index.rst:
--------------------------------------------------------------------------------
1 | 移动端lite部署
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | lite.md
8 |
--------------------------------------------------------------------------------
/docs/deployment/serving/index.rst:
--------------------------------------------------------------------------------
1 | 服务化serving部署
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | serving.md
8 |
--------------------------------------------------------------------------------
/docs/deployment/web/image/figure1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/deployment/web/image/figure1.png
--------------------------------------------------------------------------------
/docs/deployment/web/index.rst:
--------------------------------------------------------------------------------
1 | 网络化web部署
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | web.md
8 |
--------------------------------------------------------------------------------
/docs/design/create/index.rst:
--------------------------------------------------------------------------------
1 | 如何创造自己的模型
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | add_new_model.md
8 |
--------------------------------------------------------------------------------
/docs/design/use/index.rst:
--------------------------------------------------------------------------------
1 | 配置文件详解
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | use.md
8 |
--------------------------------------------------------------------------------
/docs/evaluation/index.rst:
--------------------------------------------------------------------------------
1 | 模型评估
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | evaluate.md
--------------------------------------------------------------------------------
/docs/faq/faq/faq_imgs/ann_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/faq/faq/faq_imgs/ann_config.png
--------------------------------------------------------------------------------
/docs/faq/faq/index.rst:
--------------------------------------------------------------------------------
1 | 文档问答
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | faq.md
8 |
--------------------------------------------------------------------------------
/docs/images/Lovasz_Hinge_Evaluate_mIoU.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/Lovasz_Hinge_Evaluate_mIoU.png
--------------------------------------------------------------------------------
/docs/images/Lovasz_Softmax_Evaluate_mIoU.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/Lovasz_Softmax_Evaluate_mIoU.png
--------------------------------------------------------------------------------
/docs/images/QQ_chat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/QQ_chat.png
--------------------------------------------------------------------------------
/docs/images/activate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/activate.png
--------------------------------------------------------------------------------
/docs/images/anli.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/anli.png
--------------------------------------------------------------------------------
/docs/images/api_fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/api_fig1.png
--------------------------------------------------------------------------------
/docs/images/api_fig2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/api_fig2.png
--------------------------------------------------------------------------------
/docs/images/chat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/chat.png
--------------------------------------------------------------------------------
/docs/images/cityscapes_predict_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/cityscapes_predict_demo.png
--------------------------------------------------------------------------------
/docs/images/deepglobe.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/deepglobe.png
--------------------------------------------------------------------------------
/docs/images/eiseg_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/eiseg_demo.gif
--------------------------------------------------------------------------------
/docs/images/f1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/f1.png
--------------------------------------------------------------------------------
/docs/images/f2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/f2.png
--------------------------------------------------------------------------------
/docs/images/f3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/f3.png
--------------------------------------------------------------------------------
/docs/images/feature.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/feature.png
--------------------------------------------------------------------------------
/docs/images/fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/fig1.png
--------------------------------------------------------------------------------
/docs/images/fig2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/fig2.png
--------------------------------------------------------------------------------
/docs/images/fig3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/fig3.png
--------------------------------------------------------------------------------
/docs/images/fig4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/fig4.png
--------------------------------------------------------------------------------
/docs/images/fig5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/fig5.png
--------------------------------------------------------------------------------
/docs/images/human.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/human.png
--------------------------------------------------------------------------------
/docs/images/interactive.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/interactive.gif
--------------------------------------------------------------------------------
/docs/images/love.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/love.png
--------------------------------------------------------------------------------
/docs/images/model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/model.png
--------------------------------------------------------------------------------
/docs/images/optic_test_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/optic_test_image.jpg
--------------------------------------------------------------------------------
/docs/images/paddleseg_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/paddleseg_logo.png
--------------------------------------------------------------------------------
/docs/images/quick_start_predict.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/quick_start_predict.jpg
--------------------------------------------------------------------------------
/docs/images/quick_start_vdl.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/quick_start_vdl.jpg
--------------------------------------------------------------------------------
/docs/images/readme/二次元.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/readme/二次元.gif
--------------------------------------------------------------------------------
/docs/images/readme/人体解析.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/readme/人体解析.gif
--------------------------------------------------------------------------------
/docs/images/readme/人像分割-0.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/readme/人像分割-0.gif
--------------------------------------------------------------------------------
/docs/images/readme/人像分割.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/readme/人像分割.gif
--------------------------------------------------------------------------------
/docs/images/seg_news_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/seg_news_icon.png
--------------------------------------------------------------------------------
/docs/images/teach.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/teach.png
--------------------------------------------------------------------------------
/docs/images/yinyong.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/images/yinyong.png
--------------------------------------------------------------------------------
/docs/loss_usage.md:
--------------------------------------------------------------------------------
1 | # Loss usage
2 |
3 | - [Lovasz loss](lovasz_loss.md)
4 | - To be continued
5 |
--------------------------------------------------------------------------------
/docs/models/fascnn_cn.md:
--------------------------------------------------------------------------------
1 | ### Fast-SCNN
2 |
3 | Fast-SCNN 是一个面向实时的语义分割网络。在双分支的结构基础上,大量使用了深度可分离卷积和逆残差(inverted-residual)模块,并且使用特征融合构造金字塔池化模块 (Pyramid Pooling Module)来融合上下文信息。这使得Fast-SCNN在保持高效的情况下能学习到丰富的细节信息。Fast-SCNN最大的特点是“小快灵”,即该模型在推理计算时仅需要较小的FLOPs,就可以快速推理出一个不错的结果。整个网络结构如下:
4 |
5 | 
6 |
7 |
Fast-SCNN结构图
8 |
9 | 具体原理细节请参考[Fast-SCNN: Fast Semantic Segmentation Network](https://arxiv.org/abs/1902.04502)
10 |
--------------------------------------------------------------------------------
/docs/models/images/Fast-SCNN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/models/images/Fast-SCNN.png
--------------------------------------------------------------------------------
/docs/models/images/OCRNet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/models/images/OCRNet.png
--------------------------------------------------------------------------------
/docs/models/images/UNet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/models/images/UNet.png
--------------------------------------------------------------------------------
/docs/models/images/convolution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/models/images/convolution.png
--------------------------------------------------------------------------------
/docs/models/images/deeplabv3+.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/models/images/deeplabv3+.png
--------------------------------------------------------------------------------
/docs/models/index.rst:
--------------------------------------------------------------------------------
1 | 经典模型
2 | =======================================
3 |
4 | 常见的四种经典模型
5 |
6 |
7 | .. toctree::
8 | :maxdepth: 1
9 | :caption: 文档目录:
10 |
11 | deeplabv3.md
12 | fascnn.md
13 | ocrnet.md
14 | unet.md
15 |
--------------------------------------------------------------------------------
/docs/models/unet_cn.md:
--------------------------------------------------------------------------------
1 | # U-Net
2 |
3 | U-Net [1] 起源于医疗图像分割,具有参数少、计算快、应用性强的特点,对于一般场景适应度很高。U-Net最早于2015年提出,并在ISBI 2015 Cell Tracking Challenge取得了第一。经过发展,目前有多个变形和应用。
4 | 原始U-Net的结构是标准的编码器-解码器结构。如下图所示,左侧可视为一个编码器,右侧可视为一个解码器。编码器由四个子模块组成,每个子模块包含两个卷积层,每个子模块之后又通过max pool进行下采样。编码器整体呈现逐渐缩小的结构,不断减少池化层的空间维度,缩小特征图的分辨率,以捕获上下文信息。
5 | 解码器呈现与编码器对称的扩张结构,逐步修复分割对象的细节和空间维度,实现精准的定位。解码器同样也包含四个子模块,分辨率通过上采样操作依次增大,直到与输入图像的分辨率基本一致。
6 | 该网络还使用了跳跃连接,即解码器每上采样一次,就以拼接的方式将解码器和编码器中对应相同分辨率的特征图进行特征融合,帮助解码器更好地恢复目标的细节。由于网络整体结构类似于大写的英文字母U,故得名U-Net。
7 |
8 | 
9 |
10 | U-Net结构图
11 |
12 | 具体原理细节请参考[U-Net:Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597)。
13 |
--------------------------------------------------------------------------------
/docs/module/data/index.rst:
--------------------------------------------------------------------------------
1 | 数据增强
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | data.md
8 |
--------------------------------------------------------------------------------
/docs/module/images/Lovasz_Hinge_Evaluate_mIoU.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/Lovasz_Hinge_Evaluate_mIoU.png
--------------------------------------------------------------------------------
/docs/module/images/Lovasz_Softmax_Evaluate_mIoU.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/Lovasz_Softmax_Evaluate_mIoU.png
--------------------------------------------------------------------------------
/docs/module/images/VOC2012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/VOC2012.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-1.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-10.jpg
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-11.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-2.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-3.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-4-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-4-1.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-4-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-4-2.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-5.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-6-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-6-2.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-6.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/image-7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/image-7.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/jingling-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/jingling-1.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/jingling-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/jingling-2.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/jingling-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/jingling-3.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/jingling-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/jingling-4.png
--------------------------------------------------------------------------------
/docs/module/images/annotation/jingling-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/annotation/jingling-5.png
--------------------------------------------------------------------------------
/docs/module/images/aug_method.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/aug_method.png
--------------------------------------------------------------------------------
/docs/module/images/cityscapes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/cityscapes.png
--------------------------------------------------------------------------------
/docs/module/images/cityscapes_predict_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/cityscapes_predict_demo.png
--------------------------------------------------------------------------------
/docs/module/images/cosine_decay_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/cosine_decay_example.png
--------------------------------------------------------------------------------
/docs/module/images/data_aug_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/data_aug_example.png
--------------------------------------------------------------------------------
/docs/module/images/data_aug_flip_mirror.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/data_aug_flip_mirror.png
--------------------------------------------------------------------------------
/docs/module/images/data_aug_flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/data_aug_flow.png
--------------------------------------------------------------------------------
/docs/module/images/deepglobe.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/deepglobe.png
--------------------------------------------------------------------------------
/docs/module/images/deeplabv3p.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/deeplabv3p.png
--------------------------------------------------------------------------------
/docs/module/images/dice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/dice.png
--------------------------------------------------------------------------------
/docs/module/images/dice2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/dice2.png
--------------------------------------------------------------------------------
/docs/module/images/dice3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/dice3.png
--------------------------------------------------------------------------------
/docs/module/images/fast-scnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/fast-scnn.png
--------------------------------------------------------------------------------
/docs/module/images/file_list.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/file_list.png
--------------------------------------------------------------------------------
/docs/module/images/file_list2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/file_list2.png
--------------------------------------------------------------------------------
/docs/module/images/gn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/gn.png
--------------------------------------------------------------------------------
/docs/module/images/hrnet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/hrnet.png
--------------------------------------------------------------------------------
/docs/module/images/icnet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/icnet.png
--------------------------------------------------------------------------------
/docs/module/images/image-10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/image-10.jpg
--------------------------------------------------------------------------------
/docs/module/images/loss_comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/loss_comparison.png
--------------------------------------------------------------------------------
/docs/module/images/lovasz-hinge-vis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/lovasz-hinge-vis.png
--------------------------------------------------------------------------------
/docs/module/images/lovasz-hinge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/lovasz-hinge.png
--------------------------------------------------------------------------------
/docs/module/images/lovasz-softmax.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/lovasz-softmax.png
--------------------------------------------------------------------------------
/docs/module/images/optic_test_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/optic_test_image.jpg
--------------------------------------------------------------------------------
/docs/module/images/piecewise_decay_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/piecewise_decay_example.png
--------------------------------------------------------------------------------
/docs/module/images/poly_decay_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/poly_decay_example.png
--------------------------------------------------------------------------------
/docs/module/images/pspnet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/pspnet.png
--------------------------------------------------------------------------------
/docs/module/images/pspnet2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/pspnet2.png
--------------------------------------------------------------------------------
/docs/module/images/qq_group2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/qq_group2.png
--------------------------------------------------------------------------------
/docs/module/images/quick_start_predict.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/quick_start_predict.jpg
--------------------------------------------------------------------------------
/docs/module/images/quick_start_vdl.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/quick_start_vdl.jpg
--------------------------------------------------------------------------------
/docs/module/images/rangescale.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/rangescale.png
--------------------------------------------------------------------------------
/docs/module/images/seg_news_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/seg_news_icon.png
--------------------------------------------------------------------------------
/docs/module/images/softmax_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/softmax_loss.png
--------------------------------------------------------------------------------
/docs/module/images/unet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/unet.png
--------------------------------------------------------------------------------
/docs/module/images/usage_vis_demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/usage_vis_demo.jpg
--------------------------------------------------------------------------------
/docs/module/images/visualdl_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/visualdl_image.png
--------------------------------------------------------------------------------
/docs/module/images/visualdl_scalar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/visualdl_scalar.png
--------------------------------------------------------------------------------
/docs/module/images/warmup_with_poly_decay_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/module/images/warmup_with_poly_decay_example.png
--------------------------------------------------------------------------------
/docs/module/index.rst:
--------------------------------------------------------------------------------
1 | 重要模块说明
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | data/index.rst
8 | loss/index.rst
9 | tricks/index.rst
10 |
11 |
--------------------------------------------------------------------------------
/docs/module/loss/DualTaskLoss_cn.md:
--------------------------------------------------------------------------------
1 | 简体中文 | [English](DualTaskLoss_en.md)
2 | ## [DualTaskLoss](../../../paddleseg/models/losses/gscnn_dual_task_loss.py)
3 | 用于为半监督学习的 Dual-task 一致性以对模型进行约束。DualTaskLoss 旨在强化多个任务之间的一致性。
4 |
5 | ```python
6 | class paddleseg.models.losses.DualTaskLoss(
7 | ignore_index = 255,
8 | tau = 0.5
9 | )
10 | ```
11 |
12 | ## Dual task loss 使用指南
13 |
14 | ### 参数
15 | * **ignore_index** (int64): 指定一个在标注图中要忽略的像素值,其对输入梯度不产生贡献。当标注图中存在无法标注(或很难标注)的像素时,可以将其标注为某特定灰度值。在计算损失值时,其与原图像对应位置的像素将不作为损失函数的自变量。 *默认:``255``*
16 | * **tau** (float): Gumbel softmax 样本的tau。
17 |
--------------------------------------------------------------------------------
/docs/module/loss/EdgeAttentionLoss_cn.md:
--------------------------------------------------------------------------------
1 | 简体中文 | [English](EdgeAttentionLoss_en.md)
2 | ## [EdgeAttentionLoss](../../../paddleseg/models/losses/edge_attention_loss.py)
3 | 适合以 encoder 提取edge,以 decoder 进行加权聚合的多任务训练场景。是一种融合边缘检测与注意力机制进行多 loss 的组合输出的方法。
4 |
5 | ```python
6 | class paddleseg.models.losses.EdgeAttentionLoss(
7 | edge_threshold = 0.8,
8 | ignore_index = 255
9 | )
10 | ```
11 |
12 | ## Edge attention loss 使用指南
13 |
14 | ### 参数
15 | * **edge_threshold** (float): 值大于 edge_threshold 的像素被视为边缘。
16 | * **ignore_index** (int64): 指定一个在标注图中要忽略的像素值,其对输入梯度不产生贡献。当标注图中存在无法标注(或很难标注)的像素时,可以将其标注为某特定灰度值。在计算损失值时,其与原图像对应位置的像素将不作为损失函数的自变量。 *默认:``255``*
17 |
--------------------------------------------------------------------------------
/docs/module/loss/LovaszHingeLoss_cn.md:
--------------------------------------------------------------------------------
1 | 简体中文 | [English](LovaszHingeLoss_en.md)
2 | ## [LovaszHingeLoss](../../../paddleseg/models/losses/lovasz_loss.py)
3 | Hinge Loss是在不连续、不平滑的简单阶梯损失函数上改进的一种损失函数。对于正样本,Hinge Loss的输出应大于等于1;对于正样本,Hinge Loss的输出应小于等于-1。
4 |
5 | ```python
6 | class paddleseg.models.losses.LovaszHingeLoss(ignore_index = 255)
7 | ```
8 |
9 | ## Binary Lovasz hinge loss使用指南
10 |
11 | ### 参数
12 | * **ignore_index** (int64): 指定一个在标注图中要忽略的像素值,其对输入梯度不产生贡献。当标注图中存在无法标注(或很难标注)的像素时,可以将其标注为某特定灰度值。在计算损失值时,其与原图像对应位置的像素将不作为损失函数的自变量。 *默认:``255``*
13 |
--------------------------------------------------------------------------------
/docs/module/loss/MixedLoss_cn.md:
--------------------------------------------------------------------------------
1 | 简体中文 | [English](MixedLoss_en.md)
2 | ## [MixedLoss](../../../paddleseg/models/losses/mixed_loss.py)
3 |
4 | 实现混合loss训练。PaddleSeg每一种损失函数对应网络的一个logit 输出,如果要某个网络输出应用多种损失函数需要修改网络代码。MixedLoss 将允许网络对多个损失函数结果进行加权计算,只需以模块化的形式装入,就可以实现混合loss训练。
5 |
6 | ```python
7 | class paddleseg.models.losses.MixedLoss(losses, coef)
8 | ```
9 |
10 |
11 | ## Mixed loss使用指南
12 |
13 | ### 参数
14 | * **losses** (list of nn.Layer): 由多个损失函数类所组成的列表。
15 | * **coef** (float|int): 每个损失函数类的权重比。
16 |
17 | ### 返回值
18 | * MixedLoss 类的可调用对象。
19 |
--------------------------------------------------------------------------------
/docs/module/loss/index.rst:
--------------------------------------------------------------------------------
1 | loss说明
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | lovasz_loss.md
8 |
--------------------------------------------------------------------------------
/docs/module/tricks/index.rst:
--------------------------------------------------------------------------------
1 | tricks
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | tricks.md
8 |
--------------------------------------------------------------------------------
/docs/module/tricks/tricks.md:
--------------------------------------------------------------------------------
1 | # tricks
2 |
3 | coming soon!
4 |
--------------------------------------------------------------------------------
/docs/paddleseg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/paddleseg.png
--------------------------------------------------------------------------------
/docs/pr/images/001_fork.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/pr/images/001_fork.png
--------------------------------------------------------------------------------
/docs/pr/images/002_clone.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/pr/images/002_clone.png
--------------------------------------------------------------------------------
/docs/pr/images/003_precommit_pass.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/pr/images/003_precommit_pass.png
--------------------------------------------------------------------------------
/docs/pr/images/004_create_pr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/pr/images/004_create_pr.png
--------------------------------------------------------------------------------
/docs/pr/pr/index.rst:
--------------------------------------------------------------------------------
1 | 提交PR说明
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | pr.md
8 |
--------------------------------------------------------------------------------
/docs/predict/color_map/after_mapped.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/predict/color_map/after_mapped.jpeg
--------------------------------------------------------------------------------
/docs/predict/color_map/before_mapped.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/docs/predict/color_map/before_mapped.jpeg
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx==3.1.2
2 | sphinx-markdown-tables==0.0.15
3 | recommonmark==0.6.0
4 |
--------------------------------------------------------------------------------
/docs/slim/distill/index.rst:
--------------------------------------------------------------------------------
1 | 模型蒸馏
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | distill.md
8 |
--------------------------------------------------------------------------------
/docs/slim/prune/index.rst:
--------------------------------------------------------------------------------
1 | 模型裁剪
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | prune.md
8 |
--------------------------------------------------------------------------------
/docs/slim/quant/index.rst:
--------------------------------------------------------------------------------
1 | 模型量化
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | quant.md
8 |
--------------------------------------------------------------------------------
/docs/static/static.md:
--------------------------------------------------------------------------------
1 | English | [简体中文](static_cn.md)
2 |
3 | # PaddleSeg Static Graph
4 |
5 | After release/2.3, PaddleSeg will not keep the ```legacy``` directory that is for static graph. Please find it in [release/2.2](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/legacy) or before, if you need.
6 |
--------------------------------------------------------------------------------
/docs/static/static_cn.md:
--------------------------------------------------------------------------------
1 | 简体中文 | [English](static.md)
2 |
3 | # PaddleSeg静态图
4 |
5 | 为了提供更好的动态图开发功能,从release/2.3起,PaddleSeg将不再维护静态图版本。静态图版本在[release/2.2](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/legacy)以及之前的分支继续保留。
6 |
--------------------------------------------------------------------------------
/docs/train/index.rst:
--------------------------------------------------------------------------------
1 | 模型训练
2 | ============================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | train.md
--------------------------------------------------------------------------------
/myconfig/segformer-b2-rs.yml:
--------------------------------------------------------------------------------
1 | _base_: 'rs.yml'
2 |
3 | batch_size: 16
4 | iters: 480000
5 |
6 | model:
7 | type: SegFormer_B2
8 | num_classes: 4
9 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/mix_vision_transformer_b2.tar.gz
10 |
11 | optimizer:
12 | type: AdamW
13 | beta1: 0.9
14 | beta2: 0.999
15 | weight_decay: 0.01
16 |
17 | lr_scheduler:
18 | type: PolynomialDecay
19 | learning_rate: 0.00006
20 | power: 1
21 |
22 | loss:
23 | types:
24 | - type: MixedLoss
25 | losses:
26 | - type: CrossEntropyLoss
27 | - type: LovaszSoftmaxLoss
28 | coef: [0.8, 0.2]
29 | coef: [1]
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pyyaml >= 5.1
2 | visualdl >= 2.0.0
3 | opencv-python
4 | tqdm
5 | filelock
6 | scipy
7 | prettytable
8 | sklearn
9 |
--------------------------------------------------------------------------------
/slim/distill/README.md:
--------------------------------------------------------------------------------
1 | Please refer to the [tutorial](../../docs/slim/distill/distill.md) for model distillation.
2 |
--------------------------------------------------------------------------------
/slim/prune/README.md:
--------------------------------------------------------------------------------
1 | Please refer to the [tutorial](../../docs/slim/prune/prune.md) for model pruning.
2 |
--------------------------------------------------------------------------------
/slim/quant/README.md:
--------------------------------------------------------------------------------
1 | Please refer to the [tutorial](../../docs/slim/quant/quant.md) for model quantization.
2 |
--------------------------------------------------------------------------------
/test_tipc/configs/_base_/cityscapes_1024x1024.yml:
--------------------------------------------------------------------------------
1 | _base_: './cityscapes.yml'
2 |
3 | train_dataset:
4 | transforms:
5 | - type: ResizeStepScaling
6 | min_scale_factor: 0.5
7 | max_scale_factor: 2.0
8 | scale_step_size: 0.25
9 | - type: RandomPaddingCrop
10 | crop_size: [1024, 1024]
11 | - type: RandomHorizontalFlip
12 | - type: RandomDistort
13 | brightness_range: 0.4
14 | contrast_range: 0.4
15 | saturation_range: 0.4
16 | - type: Normalize
17 |
18 | val_dataset:
19 | transforms:
20 | - type: Normalize
21 |
22 | export:
23 | transforms:
24 | - type: Resize
25 | target_size: [512, 512]
26 | - type: Normalize
27 |
--------------------------------------------------------------------------------
/test_tipc/configs/_base_/cityscapes_769x769_setr.yml:
--------------------------------------------------------------------------------
1 | _base_: './cityscapes.yml'
2 |
3 | train_dataset:
4 | transforms:
5 | - type: ResizeStepScaling
6 | min_scale_factor: 0.25
7 | max_scale_factor: 2.0
8 | scale_step_size: 0.25
9 | - type: RandomPaddingCrop
10 | crop_size: [769, 769]
11 | - type: RandomHorizontalFlip
12 | - type: RandomDistort
13 | brightness_range: 0.5
14 | contrast_range: 0.5
15 | saturation_range: 0.5
16 | - type: Normalize
17 |
18 | val_dataset:
19 | transforms:
20 | - type: Padding
21 | target_size: [2048, 1024]
22 | - type: Normalize
23 |
--------------------------------------------------------------------------------
/test_tipc/configs/_base_/pascal_voc12aug.yml:
--------------------------------------------------------------------------------
1 | _base_: './pascal_voc12.yml'
2 |
3 | train_dataset:
4 | mode: trainaug
5 |
--------------------------------------------------------------------------------
/test_tipc/configs/bisenetv1/bisenetv1_resnet18_os8_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 4
4 | iters: 160000
5 |
6 | model:
7 | type: BiseNetV1
8 | backbone:
9 | type: ResNet18_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet18_vd_ssld_v2.tar.gz
12 |
13 | optimizer:
14 | type: sgd
15 | weight_decay: 0.0005
16 |
17 | loss:
18 | types:
19 | - type: OhemCrossEntropyLoss
20 | - type: OhemCrossEntropyLoss
21 | - type: OhemCrossEntropyLoss
22 | coef: [1, 1, 1]
23 |
24 | lr_scheduler:
25 | type: PolynomialDecay
26 | learning_rate: 0.01
27 | end_lr: 0.0
28 | power: 0.9
29 |
--------------------------------------------------------------------------------
/test_tipc/configs/bisenetv2/bisenet_cityscapes_1024x1024_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | model:
4 | type: BiSeNetV2
5 | num_classes: 19
6 |
7 | optimizer:
8 | type: sgd
9 | weight_decay: 0.0005
10 |
11 | loss:
12 | types:
13 | - type: CrossEntropyLoss
14 | - type: CrossEntropyLoss
15 | - type: CrossEntropyLoss
16 | - type: CrossEntropyLoss
17 | - type: CrossEntropyLoss
18 | coef: [1, 1, 1, 1, 1]
19 |
20 | batch_size: 4
21 | iters: 160000
22 |
23 | lr_scheduler:
24 | type: PolynomialDecay
25 | learning_rate: 0.05
26 | end_lr: 0.0
27 | power: 0.9
28 |
--------------------------------------------------------------------------------
/test_tipc/configs/ccnet/ccnet_resnet101_os8_cityscapes_769x769_60k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_769x769.yml'
2 |
3 | batch_size: 2
4 | iters: 60000
5 |
6 | model:
7 | type: CCNet
8 | backbone:
9 | type: ResNet101_vd
10 | output_stride: 8
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
12 | backbone_indices: [2, 3]
13 | enable_auxiliary_loss: True
14 | dropout_prob: 0.1
15 | recurrence: 2
16 |
17 | loss:
18 | types:
19 | - type: OhemCrossEntropyLoss
20 | - type: CrossEntropyLoss
21 | coef: [1, 0.4]
22 |
23 | lr_scheduler:
24 | type: PolynomialDecay
25 | learning_rate: 0.01
26 | power: 0.9
27 | end_lr: 1.0e-4
28 |
--------------------------------------------------------------------------------
/test_tipc/configs/ddrnet/ddrnet23_cityscapes_1024x1024_120k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes_1024x1024.yml'
2 |
3 | model:
4 | type: DDRNet_23
5 | enable_auxiliary_loss: False
6 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ddrnet23_cityscapes_1024x1024_120k/pretrain/model.pdparams
7 |
8 | optimizer:
9 | type: sgd
10 | weight_decay: 0.0005
11 |
12 | loss:
13 | types:
14 | - type: OhemCrossEntropyLoss
15 | coef: [1]
16 |
17 | batch_size: 3
18 | iters: 120000
19 |
20 | lr_scheduler:
21 | type: PolynomialDecay
22 | learning_rate: 0.01
23 | end_lr: 0.0
24 | power: 0.9
25 |
--------------------------------------------------------------------------------
/test_tipc/configs/deeplabv3p_resnet50/deeplabv3p_resnet50_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name deeplabv3p_resnet50
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/pp_humanseg_server_export_512x512/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/pp_humanseg_server_export_512x512/model.pdiparams
14 | is_normalize 1
15 | is_resize 1
16 | resize_width 512
17 | resize_height 512
18 |
19 |
--------------------------------------------------------------------------------
/test_tipc/configs/fcn_hrnetw18/fcn_hrnetw18_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name fcn_hrnetw18
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/fcn_hrnetw18_cityscapes_1024x512_80k/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/fcn_hrnetw18_cityscapes_1024x512_80k/model.pdiparams
14 | is_normalize 1
15 | is_resize 0
16 |
17 |
--------------------------------------------------------------------------------
/test_tipc/configs/fcn_hrnetw18/fcn_hrnetw18_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt:
--------------------------------------------------------------------------------
1 | ===========================paddle2onnx_params===========================
2 | model_name:fcn_hrnetw18
3 | python:python3.7
4 | 2onnx: paddle2onnx
5 | --model_dir:./test_tipc/infer_models/fcn_hrnetw18_cityscapes_1024x512_80k/
6 | --model_filename:model.pdmodel
7 | --params_filename:model.pdiparams
8 | --save_file:./test_tipc/infer_models/fcn_hrnetw18_cityscapes_1024x512_80k/model.onnx
9 | --opset_version:11
10 | --enable_onnx_checker:True
11 | inference:deploy/python/infer_onnx.py
12 | --onnx_file:./test_tipc/infer_models/fcn_hrnetw18_cityscapes_1024x512_80k/model.onnx
13 | --img_path:test_tipc/cpp/cityscapes_demo.png
--------------------------------------------------------------------------------
/test_tipc/configs/fcn_hrnetw18/train_ptq_infer_python.txt:
--------------------------------------------------------------------------------
1 | ===========================ptq_params===========================
2 | model_name:fcn_hrnetw18_KL
3 | python:python3.7
4 | ##
5 | --model_dir:test_tipc/output/fcn_hrnetw18_KL/fcn_hrnetw18_cityscapes_1024x512_80k
6 | ##
7 | --config:test_tipc/configs/fcn_hrnetw18/fcn_hrnetw18_1024x512_cityscapes.yml
8 | --batch_num:1
9 | --batch_size:1
10 | ##
11 | trainer:PTQ
12 | PTQ:slim/quant/ptq.py
13 | ##
14 | ===========================infer_params===========================
15 | inference:deploy/python/infer.py
16 | --device:cpu|gpu
17 | --batch_size:1
18 | --config:quant_model/deploy.yaml
19 | --image_path:test_tipc/cpp/cityscapes_demo.png
20 | --benchmark:True
--------------------------------------------------------------------------------
/test_tipc/configs/fcn_hrnetw18_small/fcn_hrnetw18_small_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name fcn_hrnetw18_small
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/pp_humanseg_mobile_export_192x192/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/pp_humanseg_mobile_export_192x192/model.pdiparams
14 | is_normalize 1
15 | is_resize 1
16 | resize_width 192
17 | resize_height 192
18 |
19 |
--------------------------------------------------------------------------------
/test_tipc/configs/glore/glore_resnet50_os8_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 80000
5 |
6 | learning_rate:
7 | decay:
8 | end_lr: 1.0e-5
9 |
10 | loss:
11 | types:
12 | - type: CrossEntropyLoss
13 | coef: [1, 0.4]
14 |
15 | model:
16 | type: GloRe
17 | backbone:
18 | type: ResNet50_vd
19 | output_stride: 8
20 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
21 | enable_auxiliary_loss: True
22 | align_corners: False
23 | pretrained: null
24 |
--------------------------------------------------------------------------------
/test_tipc/configs/ocrnet_hrnetw18/ocrnet_hrnetw18_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 2
4 | iters: 160000
5 |
6 | model:
7 | type: OCRNet
8 | backbone:
9 | type: HRNet_W18
10 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz
11 | num_classes: 19
12 | backbone_indices: [0]
13 |
14 | optimizer:
15 | type: sgd
16 |
17 | lr_scheduler:
18 | type: PolynomialDecay
19 | learning_rate: 0.01
20 | power: 0.9
21 |
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | - type: CrossEntropyLoss
27 | coef: [1, 0.4]
28 |
--------------------------------------------------------------------------------
/test_tipc/configs/ocrnet_hrnetw18/ocrnet_hrnetw18_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name ocrnet_hrnetw18
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/ocrnet_hrnetw18_cityscapes_1024x512_160k/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/ocrnet_hrnetw18_cityscapes_1024x512_160k/model.pdiparams
14 | is_normalize 1
15 | is_resize 0
16 |
17 |
--------------------------------------------------------------------------------
/test_tipc/configs/ocrnet_hrnetw48/ocrnet_hrnetw48_cityscapes_1024x512.yml:
--------------------------------------------------------------------------------
1 | # The ocrnet_hrnetw48 config for train benchmark
2 | _base_: '../_base_/cityscapes.yml'
3 |
4 | batch_size: 2
5 | iters: 500
6 |
7 | model:
8 | type: OCRNet
9 | backbone:
10 | type: HRNet_W48
11 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz
12 | num_classes: 19
13 | backbone_indices: [0]
14 |
15 | optimizer:
16 | type: sgd
17 |
18 | lr_scheduler:
19 | type: PolynomialDecay
20 | learning_rate: 0.01
21 | power: 0.9
22 |
23 | loss:
24 | types:
25 | - type: CrossEntropyLoss
26 | - type: CrossEntropyLoss
27 | coef: [1, 0.4]
28 |
--------------------------------------------------------------------------------
/test_tipc/configs/ocrnet_hrnetw48/ocrnet_hrnetw48_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name ocrnet_hrnetw48
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/ocrnet_hrnetw48_cityscapes_1024x512_160k/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/ocrnet_hrnetw48_cityscapes_1024x512_160k/model.pdiparams
14 | is_normalize 1
15 | is_resize 0
16 |
17 |
--------------------------------------------------------------------------------
/test_tipc/configs/pp_liteseg_stdc1/pp_liteseg_stdc1_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name pp_liteseg_stdc1
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/pp_liteseg_infer_model/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/pp_liteseg_infer_model/model.pdiparams
14 | is_normalize 1
15 | is_resize 0
16 |
17 |
--------------------------------------------------------------------------------
/test_tipc/configs/pp_liteseg_stdc1/pp_liteseg_stdc1_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt:
--------------------------------------------------------------------------------
1 | ===========================paddle2onnx_params===========================
2 | model_name:pp_liteseg_stdc1
3 | python:python3.7
4 | 2onnx: paddle2onnx
5 | --model_dir:./test_tipc/infer_models/pp_liteseg_stdc1_fix_shape/
6 | --model_filename:model.pdmodel
7 | --params_filename:model.pdiparams
8 | --save_file:./test_tipc/infer_models/pp_liteseg_stdc1_fix_shape/model.onnx
9 | --opset_version:11
10 | --enable_onnx_checker:True
11 | inference:deploy/python/infer_onnx.py
12 | --onnx_file:./test_tipc/infer_models/pp_liteseg_stdc1_fix_shape/model.onnx
13 | --img_path:test_tipc/cpp/cityscapes_demo.png
--------------------------------------------------------------------------------
/test_tipc/configs/pp_liteseg_stdc2/pp_liteseg_stdc2_cityscapes_1024x512_160k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | model:
4 | type: PPLiteSeg
5 | backbone:
6 | type: STDC2
7 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/PP_STDCNet2.tar.gz
8 |
9 | optimizer:
10 | type: sgd
11 | weight_decay: 0.0005
12 |
13 | loss:
14 | types:
15 | - type: OhemCrossEntropyLoss
16 | min_kept: 130000 # batch_size * 1024 * 512 // 16
17 | - type: OhemCrossEntropyLoss
18 | min_kept: 130000
19 | - type: OhemCrossEntropyLoss
20 | min_kept: 130000
21 | coef: [1, 1, 1]
22 |
23 | batch_size: 4
24 | iters: 160000
25 |
26 | lr_scheduler:
27 | learning_rate: 0.005
28 |
--------------------------------------------------------------------------------
/test_tipc/configs/pp_liteseg_stdc2/pp_liteseg_stdc2_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name pp_liteseg_stdc2
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/pp_liteseg_stdc2_cityscapes_1024x512_scale1.0_160k/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/pp_liteseg_stdc2_cityscapes_1024x512_scale1.0_160k/model.pdiparams
14 | is_normalize 1
15 | is_resize 0
16 |
17 |
--------------------------------------------------------------------------------
/test_tipc/configs/pp_liteseg_stdc2/pp_liteseg_stdc2_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt:
--------------------------------------------------------------------------------
1 | ===========================paddle2onnx_params===========================
2 | model_name:pp_liteseg_stdc2
3 | python:python3.7
4 | 2onnx: paddle2onnx
5 | --model_dir:./test_tipc/infer_models/pp_liteseg_stdc2_fix_shape/
6 | --model_filename:model.pdmodel
7 | --params_filename:model.pdiparams
8 | --save_file:./test_tipc/infer_models/pp_liteseg_stdc2_fix_shape/model.onnx
9 | --opset_version:11
10 | --enable_onnx_checker:True
11 | inference:deploy/python/infer_onnx.py
12 | --onnx_file:./test_tipc/infer_models/pp_liteseg_stdc2_fix_shape/model.onnx
13 | --img_path:test_tipc/cpp/cityscapes_demo.png
--------------------------------------------------------------------------------
/test_tipc/configs/pphumanseg_lite/pphumanseg_lite_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name pphumanseg_lite
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/pp_humanseg_lite_export_398x224/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/pp_humanseg_lite_export_398x224/model.pdiparams
14 | is_normalize 1
15 | is_resize 1
16 | resize_width 398
17 | resize_height 224
18 |
19 |
--------------------------------------------------------------------------------
/test_tipc/configs/pphumanseg_lite/pphumanseg_lite_model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt:
--------------------------------------------------------------------------------
1 | ===========================paddle2onnx_params===========================
2 | model_name:pp_humanseg_lite
3 | python:python3.7
4 | 2onnx: paddle2onnx
5 | --model_dir:./test_tipc/infer_models/pp_humanseg_lite_export_398x224/
6 | --model_filename:model.pdmodel
7 | --params_filename:model.pdiparams
8 | --save_file:./test_tipc/infer_models/pp_humanseg_lite_export_398x224/model.onnx
9 | --opset_version:11
10 | --enable_onnx_checker:True
11 | inference:deploy/python/infer_onnx.py
12 | --onnx_file:./test_tipc/infer_models/pp_humanseg_lite_export_398x224/model.onnx
13 | --img_path:test_tipc/cpp/humanseg_demo.jpg
--------------------------------------------------------------------------------
/test_tipc/configs/pphumanseg_lite/train_ptq_infer_python.txt:
--------------------------------------------------------------------------------
1 | ===========================ptq_params===========================
2 | model_name:pp_humanseg_lite_KL
3 | python:python3.7
4 | ##
5 | --model_dir:test_tipc/output/pp_humanseg_lite_KL/pp_humanseg_lite_export_398x224
6 | ##
7 | --config:test_tipc/configs/pphumanseg_lite/pphumanseg_lite_mini_supervisely.yml
8 | --batch_num:1
9 | --batch_size:1
10 | ##
11 | trainer:PTQ
12 | PTQ:slim/quant/ptq.py
13 | ##
14 | ===========================infer_params===========================
15 | inference:deploy/python/infer.py
16 | --device:cpu|gpu
17 | --batch_size:1
18 | --config:quant_model/deploy.yaml
19 | --image_path:test_tipc/cpp/humanseg_demo.jpg
20 | --benchmark:True
--------------------------------------------------------------------------------
/test_tipc/configs/stdc_stdc1/inference_cpp.txt:
--------------------------------------------------------------------------------
1 | # model load config
2 | model_name stdc_stdc1
3 | use_gpu 0
4 | gpu_id 0
5 | gpu_mem 4000
6 | cpu_math_library_num_threads 10
7 | use_mkldnn 1
8 | use_tensorrt 0
9 | use_fp16 0
10 |
11 | # config
12 | model_path ./test_tipc/cpp/inference_models/stdc1seg_infer_model/model.pdmodel
13 | params_path ./test_tipc/cpp/inference_models/stdc1seg_infer_model/model.pdiparams
14 | is_normalize 1
15 | is_resize 0
16 |
17 |
--------------------------------------------------------------------------------
/test_tipc/configs/stdc_stdc1/stdc1_seg_cityscapes_1024x512_80k.yml:
--------------------------------------------------------------------------------
1 | _base_: '../_base_/cityscapes.yml'
2 |
3 | batch_size: 12
4 | iters: 80000
5 |
6 | model:
7 | type: STDCSeg
8 | backbone:
9 | type: STDC1
10 | pretrained: https://bj.bcebos.com/paddleseg/dygraph/STDCNet1.tar.gz
11 | pretrained: null
12 |
13 | loss:
14 | types:
15 | - type: OhemCrossEntropyLoss
16 | - type: OhemCrossEntropyLoss
17 | - type: OhemCrossEntropyLoss
18 | - type: DetailAggregateLoss
19 | coef: [1, 1, 1, 1]
20 |
--------------------------------------------------------------------------------
/test_tipc/cpp/cityscapes_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/cpp/cityscapes_demo.png
--------------------------------------------------------------------------------
/test_tipc/cpp/humanseg_demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/cpp/humanseg_demo.jpg
--------------------------------------------------------------------------------
/test_tipc/docs/cityscapes_demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/docs/cityscapes_demo.jpg
--------------------------------------------------------------------------------
/test_tipc/docs/compare_right.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/docs/compare_right.png
--------------------------------------------------------------------------------
/test_tipc/docs/compare_wrong.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/docs/compare_wrong.png
--------------------------------------------------------------------------------
/test_tipc/docs/guide.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/docs/guide.png
--------------------------------------------------------------------------------
/test_tipc/docs/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/docs/test.png
--------------------------------------------------------------------------------
/test_tipc/requirements.txt:
--------------------------------------------------------------------------------
1 | pre-commit
2 | yapf == 0.26.0
3 | flake8
4 | pyyaml >= 5.1
5 | visualdl >= 2.0.0
6 | opencv-python == 4.5.2.54
7 | tqdm
8 | filelock
9 | scipy
10 | prettytable
11 | paddleseg
12 | scikit-image
13 | numba
14 | pymatting
15 |
--------------------------------------------------------------------------------
/test_tipc/results/python_fcn_hrnetw18_small_results_fp16.txt:
--------------------------------------------------------------------------------
1 | #Images: 50
2 | mIoU: 0.8725311260007227
3 | Acc: 0.9448749852040087
4 | Kappa: 0.9448749490413363
5 | Class IoU: [0.92665219 0.81841007]
6 | Class Acc: [0.97443367 0.87082671]
7 |
--------------------------------------------------------------------------------
/test_tipc/results/python_fcn_hrnetw18_small_results_fp32.txt:
--------------------------------------------------------------------------------
1 | #Images: 50
2 | mIoU: 0.8725311260007227
3 | Acc: 0.9448749852040087
4 | Kappa: 0.9448749490413363
5 | Class IoU: [0.92665219 0.81841007]
6 | Class Acc: [0.97443367 0.87082671]
7 |
--------------------------------------------------------------------------------
/test_tipc/test_infer_js.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o errexit
4 | set -o nounset
5 |
6 | cd test_tipc/web
7 | # run humanseg test in chrome
8 | ./node_modules/.bin/jest --config ./jest.config.js
--------------------------------------------------------------------------------
/test_tipc/web/imgs/human.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/web/imgs/human.jpg
--------------------------------------------------------------------------------
/test_tipc/web/imgs/seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lzzzzzm/Classification-RSImage/9cf360956d2cefb28e3507984a3f3a38f50de916/test_tipc/web/imgs/seg.png
--------------------------------------------------------------------------------
/test_tipc/web/jest-puppeteer.config.js:
--------------------------------------------------------------------------------
1 | // jest-puppeteer.config.js
2 | module.exports = {
3 | launch: {
4 | headless: false,
5 | product: 'chrome'
6 | },
7 | browserContext: 'default',
8 | server: {
9 | command: 'python3 -m http.server 9811',
10 | port: 9811,
11 | launchTimeout: 10000,
12 | debug: true
13 | }
14 | };
--------------------------------------------------------------------------------