├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── codestyle ├── .gitignore ├── clang_format.hook ├── copyright.hook ├── cpplint_pre_commit.hook ├── docstring_checker.py ├── pylint_pre_commit.hook └── test_docstring_checker.py ├── docs ├── README.md ├── data_augmentation.md ├── losses.md └── model_zoo.md ├── plsc-logo.png ├── plsc ├── __init__.py ├── core │ ├── __init__.py │ ├── grad_clip.py │ ├── grad_scaler.py │ ├── param_fuse.py │ ├── recompute.py │ └── sync_utils.py ├── data │ ├── __init__.py │ ├── dataset │ │ ├── __init__.py │ │ ├── common_dataset.py │ │ ├── face_recognition_dataset.py │ │ ├── imagefolder_dataset.py │ │ ├── imagenet_dataset.py │ │ └── tools │ │ │ ├── lfw_style_bin_dataset_converter.py │ │ │ └── mx_recordio_2_images.py │ ├── postprocess │ │ └── __init__.py │ ├── preprocess │ │ ├── __init__.py │ │ ├── basic_transforms.py │ │ ├── batch_transforms.py │ │ └── timm_autoaugment.py │ ├── sampler │ │ ├── __init__.py │ │ └── repeatedaug_sampler.py │ └── utils │ │ ├── __init__.py │ │ ├── batch_collate_fn.py │ │ └── transform_utils.py ├── engine │ ├── __init__.py │ ├── classification │ │ ├── __init__.py │ │ ├── evaluation.py │ │ ├── train.py │ │ └── utils.py │ ├── engine.py │ ├── inference.py │ └── recognition │ │ ├── __init__.py │ │ ├── evaluation.py │ │ ├── train.py │ │ └── utils.py ├── loss │ ├── __init__.py │ ├── celoss.py │ └── marginloss.py ├── metric │ ├── __init__.py │ ├── lfw_utils.py │ └── metrics.py ├── models │ ├── __init__.py │ ├── base_model.py │ ├── cae.py │ ├── cait.py │ ├── convmae │ │ ├── __init__.py │ │ ├── conv_mae.py │ │ └── conv_vit.py │ ├── convnext.py │ ├── deit.py │ ├── face_vit.py │ ├── iresnet.py │ ├── mae.py │ ├── mobilefacenet.py │ ├── swin_transformer.py │ ├── utils │ │ ├── __init__.py │ │ ├── ema.py │ │ ├── pos_embed.py │ │ └── tome.py │ └── vision_transformer.py ├── nn │ ├── __init__.py │ ├── init.py │ ├── norm.py │ └── partialfc.py ├── optimizer │ ├── __init__.py │ ├── adafactor.py │ ├── adamw.py │ ├── momentum.py │ ├── momentum_lars.py │ └── optimizer.py ├── scheduler │ ├── __init__.py │ └── lr_scheduler.py └── utils │ ├── __init__.py │ ├── config.py │ ├── io.py │ ├── logger.py │ ├── misc.py │ └── profiler.py ├── requirements.txt ├── setup.py ├── task ├── accelerate │ └── tome │ │ ├── README.md │ │ └── validation_tome_vit.ipynb ├── classification │ ├── cait │ │ ├── README.md │ │ ├── configs │ │ │ └── cait_s24_224_in1k_1n8c_dp_fp16o2.yaml │ │ └── train.sh │ ├── convnext │ │ ├── README.md │ │ └── configs │ │ │ ├── ConvNeXt_base_224_in1k_1n8c_dp_fp32.yaml │ │ │ ├── ConvNeXt_base_224_in1k_4n32c_dp_fp16O2.yaml │ │ │ └── ConvNeXt_base_224_in1k_4n32c_dp_fp32.yaml │ ├── deit │ │ ├── README.md │ │ └── configs │ │ │ ├── DeiT_base_patch16_224_in1k_1n8c_dp_fp16o2.yaml │ │ │ ├── DeiT_base_patch16_224_in1k_1n8c_dp_fp32.yaml │ │ │ └── DeiT_base_patch16_224_in1k_2n16c_dp_fp16o2.yaml │ ├── swin │ │ ├── README.md │ │ ├── configs │ │ │ ├── swin_base_patch4_window7_224_in1k_1n8c_dp_fp16o1.yaml │ │ │ └── swin_base_patch4_window7_224_in1k_1n8c_dp_fp16o2.yaml │ │ └── train.sh │ └── vit │ │ ├── README.md │ │ └── configs │ │ ├── ViT_base_patch16_224_in1k_1n8c_dp_fp16o2.yaml │ │ ├── ViT_base_patch16_384_ft_in1k_1n8c_dp_fp16o2.yaml │ │ ├── ViT_large_patch16_224_in21k_4n32c_dp_fp16o2.yaml │ │ └── ViT_large_patch16_384_in1k_ft_4n32c_dp_fp16o2.yaml ├── recognition │ └── face │ │ ├── README.md │ │ ├── configs │ │ ├── FaceViT_base_patch9_112_WebFace42M_CosFace_pfc03_droppath005_mask005_1n8c_dp_mp_fp16o1.yaml │ │ ├── FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc02_droppath005_mask0_1n8c_dp_mp_fp16o1.yaml │ │ ├── FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc10_droppath005_mask0_1n8c_dp_mp_fp16o1.yaml │ │ ├── IResNet100_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.yaml │ │ ├── IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml │ │ └── MobileFaceNet_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.yaml │ │ ├── eval_ijbc.sh │ │ ├── export.sh │ │ ├── onnx_helper.py │ │ ├── onnx_ijbc.py │ │ ├── photo_clustering.ipynb │ │ └── train.sh └── ssl │ ├── cae │ ├── README.md │ ├── dall_e │ │ ├── __init__.py │ │ ├── dalle_vae.py │ │ ├── decoder.py │ │ ├── encoder.py │ │ └── utils.py │ ├── engine_finetune.py │ ├── engine_pretrain.py │ ├── extract_model.py │ ├── finetune.sh │ ├── linprobe.sh │ ├── main_finetune.py │ ├── main_linprobe.py │ ├── main_pretrain.py │ ├── pretrain.sh │ └── util │ │ ├── loss.py │ │ ├── lr_decay.py │ │ ├── lr_sched.py │ │ ├── masking_generator.py │ │ └── misc.py │ ├── mae │ ├── README.md │ ├── engine_finetune.py │ ├── engine_pretrain.py │ ├── finetune.sh │ ├── finetune_convmae.sh │ ├── linprobe.sh │ ├── linprobe_convmae.sh │ ├── main_finetune.py │ ├── main_linprobe.py │ ├── main_pretrain.py │ ├── pretrain.sh │ ├── pretrain_convmae.sh │ └── util │ │ ├── loss.py │ │ ├── lr_decay.py │ │ ├── lr_sched.py │ │ ├── misc.py │ │ ├── optim_factory.py │ │ └── pos_embed.py │ └── mocov3 │ ├── README.md │ ├── builder_moco.py │ ├── configs │ └── DeiT_base_patch16_224_in1k_1n8c_dp_fp16o1.yaml │ ├── extract_weight.py │ ├── finetune.sh │ ├── linprob.sh │ ├── main_lincls.py │ ├── main_moco.py │ ├── pretrain.sh │ └── vit_moco.py ├── tests ├── CI │ ├── before_hook.sh │ ├── case.sh │ ├── classification │ │ ├── cait │ │ │ └── cait_s24_224_in1k_1n8c_dp_fp16o2.sh │ │ ├── convnext │ │ │ └── ConvNeXt_base_224_in1k_1n8c_dp_fp32.sh │ │ ├── deit │ │ │ ├── DeiT_base_patch16_224_in1k_1n8c_dp_fp16o2.sh │ │ │ └── DeiT_base_patch16_224_in1k_1n8c_dp_fp32.sh │ │ ├── swin │ │ │ └── swin_base_patch4_window7_224_fp16o2.sh │ │ └── vit │ │ │ ├── ViT_base_patch16_224_in1k_1n8c_dp_fp16o2.sh │ │ │ └── ViT_base_patch16_384_ft_in1k_1n8c_dp_fp16o2.sh │ ├── end_hook.sh │ ├── recognition │ │ └── face │ │ │ ├── FaceViT_base_patch9_112_WebFace42M_CosFace_pfc03_droppath005_mask005_1n8c_dp_mp_fp16o1.sh │ │ │ ├── FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc02_droppath005_mask0_1n8c_dp_mp_fp16o1.sh │ │ │ ├── FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc10_droppath005_mask0_1n8c_dp_mp_fp16o1.sh │ │ │ ├── IResNet100_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.sh │ │ │ ├── IResNet50_MS1MV3_ArcFace_pfc01_1n1c_fp16o1.sh │ │ │ ├── IResNet50_MS1MV3_ArcFace_pfc01_1n8c_dp8_fp16o1.sh │ │ │ ├── IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.sh │ │ │ └── MobileFaceNet_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.sh │ ├── run_all.sh │ └── ssl │ │ ├── cae │ │ ├── cae_base_patch16_224_ft_in1k_1n8c_dp_fp16o1.sh │ │ ├── cae_base_patch16_224_lp_in1k_1n8c_dp_fp16o1.sh │ │ └── cae_base_patch16_224_pt_in1k_1n8c_dp_fp16o1.sh │ │ ├── convmae │ │ ├── convmae_convvit_base_patch16_ft_in1k_1n8c_dp_fp16o1.sh │ │ ├── convmae_convvit_base_patch16_lp_in1k_1n8c_dp_fp16o1.sh │ │ └── convmae_convvit_base_patch16_pt_in1k_1n8c_dp_fp16o1.sh │ │ └── mae │ │ ├── mae_vit_base_patch16_ft_in1k_1n8c_dp_fp16o1.sh │ │ ├── mae_vit_base_patch16_lp_in1k_1n8c_dp_fp16o1.sh │ │ └── mae_vit_base_patch16_pt_in1k_1n8c_dp_fp16o1.sh └── test_tipc │ ├── README.md │ ├── classification │ ├── N1C8 │ │ ├── ConvNeXt_base_224_bs512_fp32_DP8-MP1.sh │ │ ├── DeiT_base_patch16_224_bs128_fp16o2_DP8-MP1.sh │ │ ├── DeiT_base_patch16_224_bs128_fp32_DP8-MP1.sh │ │ ├── ViT_base_patch16_224_bs512_fp16_DP8-MP1.sh │ │ ├── ViT_base_patch16_384_ft_bs512_fp16_DP8-MP1.sh │ │ ├── cait_s24_224_bs128_fp16o2_DP8-MP1.sh │ │ └── swin_base_patch4_window7_224_bs128_fp16o2_DP8-MP1.sh │ └── benchmark_common │ │ ├── prepare.sh │ │ └── run_benchmark.sh │ ├── recognition │ ├── N1C1 │ │ └── IResNet50_pfc01_bs128_fp16_DP1-MP1.sh │ ├── N1C8 │ │ ├── FaceViT_base_patch9_112_pfc03_bs128_fp16_DP8-MP8.sh │ │ ├── FaceViT_tiny_patch9_112_pfc02_bs256_fp16_DP8-MP8.sh │ │ ├── FaceViT_tiny_patch9_112_pfc10_bs256_fp16_DP8-MP8.sh │ │ ├── IResNet100_pfc02_bs128_fp16_DP8-MP8.sh │ │ ├── IResNet50_pfc01_bs128_fp16_DP8-MP1.sh │ │ └── IResNet50_pfc10_bs128_fp16_DP8-MP8.sh │ └── benchmark_common │ │ ├── prepare.sh │ │ └── run_benchmark.sh │ └── ssl │ ├── N1C8 │ ├── convmae_convvit_base_patch16_ft_bs32_fp16o1_DP8.sh │ ├── convmae_convvit_base_patch16_lp_bs128_fp16o1_DP8.sh │ ├── convmae_convvit_base_patch16_pt_bs64_fp16o1_DP8.sh │ ├── mae_vit_base_patch16_ft_bs32_fp16o1_DP8.sh │ ├── mae_vit_base_patch16_lp_bs512_fp16o1_DP8.sh │ └── mae_vit_base_patch16_pt_bs128_fp16o1_DP8.sh │ └── benchmark_common │ ├── prepare.sh │ └── run_benchmark.sh ├── tools ├── eval.py ├── export.py └── train.py ├── tutorials ├── README.md ├── advanced │ └── custom_dataset.md ├── basic │ └── config.md ├── demos │ └── face_recognition.md └── get_started │ ├── dataset.md │ ├── installation.md │ └── quick_run_recognition.md └── version.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | .DS_Store 104 | 105 | # logs and output 106 | **/output/ 107 | **/log/ 108 | 109 | # dataset 110 | **/dataset/ -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/Lucas-C/pre-commit-hooks.git 3 | sha: v1.0.1 4 | hooks: 5 | - id: remove-crlf 6 | files: (?!.*third_party)^.*$ | (?!.*book)^.*$ 7 | - repo: https://github.com/PaddlePaddle/mirrors-yapf.git 8 | sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37 9 | hooks: 10 | - id: yapf 11 | files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | sha: 5bf6c09bfa1297d3692cadd621ef95f1284e33c0 14 | hooks: 15 | - id: check-added-large-files 16 | - id: check-merge-conflict 17 | - id: check-symlinks 18 | - id: detect-private-key 19 | files: (?!.*third_party)^.*$ | (?!.*book)^.*$ 20 | - id: end-of-file-fixer 21 | - repo: local 22 | hooks: 23 | - id: clang-format-with-version-check 24 | name: clang-format 25 | description: Format files with ClangFormat. 26 | entry: bash ./codestyle/clang_format.hook -i 27 | language: system 28 | files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$ 29 | - repo: local 30 | hooks: 31 | - id: cpplint-cpp-source 32 | name: cpplint 33 | description: Check C++ code style using cpplint.py. 34 | entry: bash ./codestyle/cpplint_pre_commit.hook 35 | language: system 36 | files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$ 37 | - repo: local 38 | hooks: 39 | - id: pylint-doc-string 40 | name: pylint 41 | description: Check python docstring style using docstring_checker. 42 | entry: bash ./codestyle/pylint_pre_commit.hook 43 | language: system 44 | files: \.(py)$ 45 | - repo: local 46 | hooks: 47 | - id: copyright_checker 48 | name: copyright_checker 49 | entry: python ./codestyle/copyright.hook 50 | language: system 51 | files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto|py|sh)$ 52 | exclude: (?!.*third_party)^.*$ | (?!.*book)^.*$ 53 | -------------------------------------------------------------------------------- /codestyle/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /codestyle/clang_format.hook: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | readonly VERSION="13.0.0" 5 | 6 | version=$(clang-format -version) 7 | 8 | if ! [[ $(python -V 2>&1 | awk '{print $2}' | awk -F '.' '{print $1$2}') -ge 36 ]]; then 9 | echo "clang-format installation by pip need python version great equal 3.6, 10 | please change the default python to higher version." 11 | exit 1 12 | fi 13 | 14 | if ! [[ $version == *"$VERSION"* ]]; then 15 | # low version of pip may not have the source of clang-format whl 16 | pip install --upgrade pip 17 | pip install clang-format==13.0.0 18 | fi 19 | 20 | clang-format $@ 21 | -------------------------------------------------------------------------------- /codestyle/cpplint_pre_commit.hook: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TOTAL_ERRORS=0 4 | 5 | readonly VERSION="1.6.0" 6 | 7 | version=$(cpplint --version) 8 | 9 | if [[ ! $TRAVIS_BRANCH ]]; then 10 | # install cpplint on local machine. 11 | if ! [[ $version == *"$VERSION"* ]]; then 12 | pip install cpplint==1.6.0 13 | fi 14 | # diff files on local machine. 15 | files=$(git diff --cached --name-status | awk '$1 != "D" {print $2}') 16 | else 17 | # diff files between PR and latest commit on Travis CI. 18 | branch_ref=$(git rev-parse "$TRAVIS_BRANCH") 19 | head_ref=$(git rev-parse HEAD) 20 | files=$(git diff --name-status $branch_ref $head_ref | awk '$1 != "D" {print $2}') 21 | fi 22 | # The trick to remove deleted files: https://stackoverflow.com/a/2413151 23 | for file in $files; do 24 | if [[ $file =~ ^(patches/.*) ]]; then 25 | continue; 26 | else 27 | cpplint --filter=-readability/fn_size,-build/include_what_you_use,-build/c++11,-whitespace/parens $file; 28 | TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); 29 | fi 30 | done 31 | 32 | exit $TOTAL_ERRORS 33 | -------------------------------------------------------------------------------- /codestyle/pylint_pre_commit.hook: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TOTAL_ERRORS=0 4 | 5 | 6 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | export PYTHONPATH=$DIR:$PYTHONPATH 8 | 9 | readonly VERSION="2.12.0" 10 | version=$(pylint --version | grep 'pylint') 11 | 12 | if ! [[ $version == *"$VERSION"* ]]; then 13 | pip install pylint==2.12.0 14 | fi 15 | 16 | # The trick to remove deleted files: https://stackoverflow.com/a/2413151 17 | for file in $(git diff --name-status | awk '$1 != "D" {print $2}'); do 18 | pylint --disable=all --load-plugins=docstring_checker \ 19 | --enable=doc-string-one-line,doc-string-end-with,doc-string-with-all-args,doc-string-triple-quotes,doc-string-missing,doc-string-indent-error,doc-string-with-returns,doc-string-with-raises $file; 20 | TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); 21 | done 22 | 23 | exit $TOTAL_ERRORS 24 | #For now, just warning: 25 | #exit 0 26 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/PLSC/bd0c824649f820d18711bc268381321545683256/docs/README.md -------------------------------------------------------------------------------- /docs/losses.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/PLSC/bd0c824649f820d18711bc268381321545683256/docs/losses.md -------------------------------------------------------------------------------- /docs/model_zoo.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/PLSC/bd0c824649f820d18711bc268381321545683256/docs/model_zoo.md -------------------------------------------------------------------------------- /plsc-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/PLSC/bd0c824649f820d18711bc268381321545683256/plsc-logo.png -------------------------------------------------------------------------------- /plsc/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from plsc import core as core 16 | from plsc import data as data 17 | from plsc import loss as loss 18 | from plsc import metric as metric 19 | from plsc import models as models 20 | from plsc import nn as nn 21 | from plsc import optimizer as optimizer 22 | from plsc import scheduler as scheduler 23 | from plsc import utils as utils 24 | -------------------------------------------------------------------------------- /plsc/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from .recompute import recompute_warp 16 | from .grad_scaler import GradScaler 17 | from .sync_utils import grad_sync, param_sync 18 | from .param_fuse import get_fused_params 19 | -------------------------------------------------------------------------------- /plsc/core/recompute.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import functools 16 | 17 | import paddle 18 | from paddle import nn 19 | from paddle.distributed.fleet.utils import recompute 20 | 21 | 22 | def wrap_forward(func, newfunc): 23 | @functools.wraps(func) 24 | def run(*args, **kwargs): 25 | return newfunc(func, *args, **kwargs) 26 | 27 | return run 28 | 29 | 30 | def recompute_forward(func, *args, **kwargs): 31 | return recompute(func, *args, **kwargs) 32 | 33 | 34 | def recompute_warp(model, layerlist_interval=1, names=[]): 35 | 36 | for name, layer in model._sub_layers.items(): 37 | if isinstance(layer, nn.LayerList): 38 | for idx, sub_layer in enumerate(layer): 39 | if layerlist_interval >= 1 and idx % layerlist_interval == 0: 40 | sub_layer.forward = wrap_forward(sub_layer.forward, 41 | recompute_forward) 42 | if name in names: 43 | layer.forward = wrap_forward(layer.forward, recompute_forward) 44 | -------------------------------------------------------------------------------- /plsc/core/sync_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import paddle 16 | 17 | 18 | @paddle.no_grad() 19 | def grad_sync(param_groups, comm_group=None, grad_avg=True): 20 | """ 21 | sync the gradients of params 22 | """ 23 | 24 | nranks = paddle.distributed.get_world_size( 25 | ) if comm_group is None else comm_group.nranks 26 | 27 | if nranks < 2: 28 | return 29 | 30 | for group in param_groups: 31 | for p in group['params']: 32 | if p.is_distributed: 33 | continue 34 | 35 | grad = p.grad 36 | if grad is None: 37 | continue 38 | 39 | paddle.distributed.all_reduce(grad, sync_op=True, group=comm_group) 40 | if grad_avg: 41 | p.grad.detach().scale_(1.0 / nranks) 42 | 43 | return None 44 | 45 | 46 | @paddle.no_grad() 47 | def param_sync(model, src_rank=0, comm_group=None): 48 | """ 49 | sync the gradients of params 50 | """ 51 | 52 | nranks = paddle.distributed.get_world_size( 53 | ) if comm_group is None else comm_group.nranks 54 | 55 | if nranks < 2: 56 | return 57 | 58 | for _, param in model._obtain_parameters_buffers().items(): 59 | 60 | if hasattr(param, 'is_distributed') and param.is_distributed: 61 | continue 62 | 63 | if getattr(param, "no_sync", False): 64 | continue 65 | 66 | paddle.distributed.broadcast( 67 | param, src=src_rank, group=comm_group, sync_op=True) 68 | 69 | return None 70 | -------------------------------------------------------------------------------- /plsc/data/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import numpy as np 16 | from PIL import Image 17 | import cv2 18 | 19 | _image_backend = "pil" 20 | 21 | 22 | def set_image_backend(backend): 23 | """ 24 | Specifies the package used to load images. 25 | 26 | Args: 27 | backend (string): Name of the image backend. one of {'PIL', 'accimage'}. 28 | The :mod:`accimage` package uses the Intel IPP library. It is 29 | generally faster than PIL, but does not support as many operations. 30 | """ 31 | global _image_backend 32 | if backend not in ["pil", "cv2"]: 33 | raise ValueError( 34 | f"Invalid backend '{backend}'. Options are 'pil' and 'cv2'") 35 | _image_backend = backend 36 | 37 | 38 | def get_image_backend(): 39 | """ 40 | Gets the name of the package used to load images 41 | """ 42 | return _image_backend 43 | 44 | 45 | def pil_loader(path: str): 46 | # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) 47 | with open(path, "rb") as f: 48 | img = Image.open(f) 49 | img = img.convert("RGB") 50 | return img 51 | 52 | 53 | def cv2_loader(path: str): 54 | return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB) 55 | 56 | 57 | def default_loader(path: str): 58 | if get_image_backend() == "cv2": 59 | return cv2_loader(path) 60 | else: 61 | return pil_loader(path) 62 | 63 | 64 | from .imagenet_dataset import ImageNetDataset 65 | from .face_recognition_dataset import FaceIdentificationDataset, FaceVerificationDataset, FaceRandomDataset 66 | from .imagefolder_dataset import ImageFolder 67 | -------------------------------------------------------------------------------- /plsc/data/dataset/common_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import print_function 16 | 17 | import numpy as np 18 | 19 | from paddle.io import Dataset 20 | from plsc.utils import logger 21 | 22 | 23 | class CommonDataset(Dataset): 24 | def __init__(self, 25 | image_root, 26 | cls_label_path, 27 | transform_ops=None, 28 | delimiter=" ", 29 | multi_label=False, 30 | class_num=None): 31 | if multi_label: 32 | assert class_num is not None, "Must set class_num when multi_label=True" 33 | self.multi_label = multi_label 34 | self.classes_num = class_num 35 | 36 | self._img_root = image_root 37 | self._cls_path = cls_label_path 38 | self.delimiter = delimiter 39 | if transform_ops: 40 | self._transform_ops = transform_ops 41 | 42 | self.images = [] 43 | self.labels = [] 44 | self._load_anno() 45 | 46 | def _load_anno(self): 47 | pass 48 | 49 | def __getitem__(self, idx): 50 | with open(self.images[idx], 'rb') as f: 51 | img = f.read() 52 | if self._transform_ops: 53 | img = self._transform_ops(img) 54 | if self.multi_label: 55 | one_hot = np.zeros([self.classes_num], dtype=np.float32) 56 | cls_idx = [int(e) for e in self.labels[idx].split(',')] 57 | for idx in cls_idx: 58 | one_hot[idx] = 1.0 59 | return (img, one_hot) 60 | else: 61 | return (img, np.int32(self.labels[idx])) 62 | 63 | def __len__(self): 64 | return len(self.images) 65 | 66 | @property 67 | def class_num(self): 68 | if self.multi_label: 69 | return self.classes_num 70 | return len(set(self.labels)) 71 | -------------------------------------------------------------------------------- /plsc/data/dataset/imagenet_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import print_function 16 | 17 | import numpy as np 18 | import os 19 | 20 | from .common_dataset import CommonDataset 21 | 22 | 23 | class ImageNetDataset(CommonDataset): 24 | def __init__(self, 25 | image_root, 26 | cls_label_path, 27 | transform_ops=None, 28 | delimiter=" ", 29 | multi_label=False, 30 | class_num=None): 31 | super(ImageNetDataset, self).__init__(image_root, cls_label_path, 32 | transform_ops, delimiter, 33 | multi_label, class_num) 34 | 35 | def _load_anno(self, seed=None): 36 | assert os.path.exists( 37 | self._cls_path), f"{self._cls_path} does not exists" 38 | assert os.path.exists( 39 | self._img_root), f"{self._img_root} does not exists" 40 | self.images = [] 41 | self.labels = [] 42 | 43 | with open(self._cls_path) as fd: 44 | lines = fd.readlines() 45 | if seed is not None: 46 | np.random.RandomState(seed).shuffle(lines) 47 | for l in lines: 48 | l = l.strip().split(self.delimiter) 49 | self.images.append(os.path.join(self._img_root, l[0])) 50 | if self.multi_label: 51 | self.labels.append(l[1]) 52 | else: 53 | self.labels.append(np.int32(l[1])) 54 | assert os.path.exists(self.images[ 55 | -1]), f"{self.images[-1]} is not exists." 56 | -------------------------------------------------------------------------------- /plsc/data/dataset/tools/mx_recordio_2_images.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import os 16 | import argparse 17 | import numpy as np 18 | import numbers 19 | import mxnet as mx 20 | import cv2 21 | import tqdm 22 | import shutil 23 | 24 | 25 | def main(args): 26 | path_imgrec = os.path.join(args.root_dir, 'train.rec') 27 | path_imgidx = os.path.join(args.root_dir, 'train.idx') 28 | imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') 29 | s = imgrec.read_idx(0) 30 | header, _ = mx.recordio.unpack(s) 31 | if header.flag > 0: 32 | header0 = (int(header.label[0]), int(header.label[1])) 33 | imgidx = np.array(range(1, int(header.label[0]))) 34 | else: 35 | imgidx = np.array(list(imgrec.keys)) 36 | 37 | classes = set() 38 | os.makedirs(os.path.join(args.output_dir, 'images'), exist_ok=True) 39 | fp = open(os.path.join(args.output_dir, 'label.txt'), 'w') 40 | for idx in tqdm.tqdm(imgidx): 41 | s = imgrec.read_idx(idx) 42 | header, img = mx.recordio.unpack(s) 43 | label = header.label 44 | if not isinstance(label, numbers.Number): 45 | label = label[0] 46 | img = mx.image.imdecode(img).asnumpy()[..., ::-1] 47 | label = int(label) 48 | classes.add(label) 49 | 50 | filename = 'images/%08d.jpg' % idx 51 | fp.write('%s\t%d\n' % (filename, label)) 52 | cv2.imwrite( 53 | os.path.join(args.output_dir, filename), img, 54 | [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 55 | fp.close() 56 | shutil.copy( 57 | os.path.join(args.root_dir, 'agedb_30.bin'), 58 | os.path.join(args.output_dir, 'agedb_30.bin')) 59 | shutil.copy( 60 | os.path.join(args.root_dir, 'cfp_fp.bin'), 61 | os.path.join(args.output_dir, 'cfp_fp.bin')) 62 | shutil.copy( 63 | os.path.join(args.root_dir, 'lfw.bin'), 64 | os.path.join(args.output_dir, 'lfw.bin')) 65 | print('num_image: ', len(imgidx), 'num_classes: ', len(classes)) 66 | with open(os.path.join(args.output_dir, 'README.md'), 'w') as f: 67 | f.write('num_image: {}\n'.format(len(imgidx))) 68 | f.write('num_classes: {}\n'.format(len(classes))) 69 | 70 | 71 | if __name__ == '__main__': 72 | parser = argparse.ArgumentParser() 73 | parser.add_argument( 74 | "--root_dir", 75 | type=str, 76 | help="Root directory to mxnet dataset.", ) 77 | parser.add_argument( 78 | "--output_dir", 79 | type=str, 80 | help="Path to output.", ) 81 | args = parser.parse_args() 82 | main(args) 83 | -------------------------------------------------------------------------------- /plsc/data/postprocess/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /plsc/data/preprocess/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | from .basic_transforms import * 15 | from .batch_transforms import Mixup, Cutmix, TransformOpSampler 16 | from .timm_autoaugment import TimmAutoAugment 17 | -------------------------------------------------------------------------------- /plsc/data/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from paddle.io import DistributedBatchSampler, BatchSampler 16 | from .repeatedaug_sampler import RepeatedAugSampler 17 | -------------------------------------------------------------------------------- /plsc/data/sampler/repeatedaug_sampler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from collections import defaultdict 18 | import numpy as np 19 | import copy 20 | import random 21 | import math 22 | from paddle.io import DistributedBatchSampler 23 | 24 | 25 | class RepeatedAugSampler(DistributedBatchSampler): 26 | """ 27 | Randomly sample N identities, then for each identity, 28 | randomly sample K instances, therefore batch size is N*K. 29 | Args: 30 | - data_source (list): list of (img_path, pid, camid). 31 | - num_instances (int): number of instances per identity in a batch. 32 | - batch_size (int): number of examples in a batch. 33 | """ 34 | 35 | def __init__(self, 36 | dataset, 37 | batch_size, 38 | num_replicas=None, 39 | rank=None, 40 | shuffle=False, 41 | drop_last=False): 42 | super(RepeatedAugSampler, self).__init__( 43 | dataset, batch_size, num_replicas, rank, shuffle, drop_last) 44 | self.num_samples = int( 45 | math.ceil(len(self.dataset) * 3.0 / self.nranks)) 46 | self.total_size = self.num_samples * self.nranks 47 | self.num_selected_samples = int( 48 | math.floor(len(self.dataset) // 256 * 256 / self.nranks)) 49 | 50 | def __iter__(self): 51 | num_samples = len(self.dataset) 52 | indices = np.arange(num_samples).tolist() 53 | if self.shuffle: 54 | np.random.RandomState(self.epoch).shuffle(indices) 55 | self.epoch += 1 56 | 57 | indices = [ele for ele in indices for i in range(3)] 58 | indices += indices[:(self.total_size - len(indices))] 59 | assert len(indices) == self.total_size 60 | 61 | # subsample 62 | indices = indices[self.local_rank:self.total_size:self.nranks] 63 | assert len(indices) == self.num_samples 64 | _sample_iter = iter(indices[:self.num_selected_samples]) 65 | 66 | batch_indices = [] 67 | for idx in _sample_iter: 68 | batch_indices.append(idx) 69 | if len(batch_indices) == self.batch_size: 70 | yield batch_indices 71 | batch_indices = [] 72 | if not self.drop_last and len(batch_indices) > 0: 73 | yield batch_indices 74 | 75 | def __len__(self): 76 | num_samples = self.num_selected_samples 77 | num_samples += int(not self.drop_last) * (self.batch_size - 1) 78 | return num_samples // self.batch_size 79 | -------------------------------------------------------------------------------- /plsc/data/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from .transform_utils import create_preprocess_operators 16 | from .batch_collate_fn import default_collate_fn 17 | -------------------------------------------------------------------------------- /plsc/data/utils/transform_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from plsc.data import preprocess 16 | 17 | 18 | def create_preprocess_operators(params): 19 | """ 20 | create operators based on the config 21 | Args: 22 | params(list): a dict list, used to create some operators 23 | """ 24 | assert params is None or isinstance(params, list), ( 25 | 'operator config should be a list or None') 26 | if params is None: 27 | return None 28 | 29 | ops = [] 30 | for operator in params: 31 | assert isinstance(operator, 32 | dict) and len(operator) == 1, "yaml format error" 33 | op_name = list(operator)[0] 34 | param = {} if operator[op_name] is None else operator[op_name] 35 | op = getattr(preprocess, op_name)(**param) 36 | ops.append(op) 37 | 38 | if len(ops) > 0: 39 | return preprocess.Compose(ops) 40 | return None 41 | -------------------------------------------------------------------------------- /plsc/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /plsc/engine/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from .train import default_train_one_epoch 16 | from .evaluation import default_eval 17 | -------------------------------------------------------------------------------- /plsc/engine/classification/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import datetime 20 | from plsc.utils import logger 21 | from plsc.utils.misc import SmoothedValue 22 | 23 | 24 | def update_metric(trainer, out, batch, batch_size): 25 | # calc metric 26 | if trainer.train_metric_func is not None: 27 | metric_dict = trainer.train_metric_func(out, batch[-1]) 28 | for key in metric_dict: 29 | if key not in trainer.output_info: 30 | trainer.output_info[key] = SmoothedValue( 31 | window_size=trainer.print_batch_step) 32 | trainer.output_info[key].update(metric_dict[key], batch_size) 33 | 34 | 35 | def update_loss(trainer, loss_dict, batch_size): 36 | # update_output_info 37 | for key in loss_dict: 38 | if key not in trainer.output_info: 39 | trainer.output_info[key] = SmoothedValue( 40 | window_size=trainer.print_batch_step) 41 | trainer.output_info[key].update(loss_dict[key].item(), batch_size) 42 | 43 | 44 | def log_info(trainer, batch_size, epoch_id, iter_id): 45 | lr_msg = "lr: none" 46 | lr_msg = "lr: {:.6f}".format(trainer.optimizer.get_lr()) 47 | 48 | metric_msg = ", ".join([ 49 | "{}: {:.5f}".format(key, trainer.output_info[key].avg) 50 | for key in trainer.output_info 51 | ]) 52 | time_msg = "s, ".join([ 53 | "{}: {:.5f}".format(key, trainer.time_info[key].avg) 54 | for key in trainer.time_info 55 | ]) 56 | 57 | total_batch_size = batch_size * trainer.config["Global"]["world_size"] 58 | ips_msg = "ips: {:.5f} images/sec".format( 59 | total_batch_size / trainer.time_info["batch_cost"].avg) 60 | eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1 61 | ) * len(trainer.train_dataloader) - iter_id 62 | ) * trainer.time_info["batch_cost"].avg 63 | eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) 64 | logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( 65 | epoch_id, trainer.config["Global"]["epochs"], iter_id, 66 | len(trainer.train_dataloader), lr_msg, metric_msg, time_msg, ips_msg, 67 | eta_msg)) 68 | 69 | logger.scaler( 70 | name="lr", 71 | value=trainer.optimizer.get_lr(), 72 | step=trainer.global_step, 73 | writer=trainer.vdl_writer) 74 | for key in trainer.output_info: 75 | logger.scaler( 76 | name="train_{}".format(key), 77 | value=trainer.output_info[key].avg, 78 | step=trainer.global_step, 79 | writer=trainer.vdl_writer) 80 | -------------------------------------------------------------------------------- /plsc/engine/recognition/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from .train import default_train_one_epoch 16 | from .evaluation import face_verification_eval 17 | -------------------------------------------------------------------------------- /plsc/engine/recognition/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import datetime 20 | from plsc.utils import logger 21 | from plsc.utils.misc import SmoothedValue 22 | 23 | 24 | def update_metric(trainer, out, batch, batch_size): 25 | # calc metric 26 | if trainer.train_metric_func is not None: 27 | metric_dict = trainer.train_metric_func(out, batch[-1]) 28 | for key in metric_dict: 29 | if key not in trainer.output_info: 30 | trainer.output_info[key] = SmoothedValue( 31 | window_size=trainer.print_batch_step) 32 | trainer.output_info[key].update(metric_dict[key], batch_size) 33 | 34 | 35 | def update_loss(trainer, loss_dict, batch_size): 36 | # update_output_info 37 | for key in loss_dict: 38 | if key not in trainer.output_info: 39 | trainer.output_info[key] = SmoothedValue( 40 | window_size=trainer.print_batch_step) 41 | trainer.output_info[key].update(loss_dict[key].item(), batch_size) 42 | 43 | 44 | def log_info(trainer, batch_size, epoch_id, iter_id): 45 | lr_msg = "lr: none" 46 | lr_msg = "lr: {:.6f}".format(trainer.optimizer.get_lr()) 47 | 48 | metric_msg = ", ".join([ 49 | "{}: {:.5f}".format(key, trainer.output_info[key].avg) 50 | for key in trainer.output_info 51 | ]) 52 | time_msg = "s, ".join([ 53 | "{}: {:.5f}".format(key, trainer.time_info[key].avg) 54 | for key in trainer.time_info 55 | ]) 56 | 57 | total_batch_size = batch_size * trainer.config["Global"]["world_size"] 58 | ips_msg = "ips: {:.5f} images/sec".format( 59 | total_batch_size / trainer.time_info["batch_cost"].avg) 60 | eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1 61 | ) * len(trainer.train_dataloader) - iter_id 62 | ) * trainer.time_info["batch_cost"].avg 63 | eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) 64 | logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( 65 | epoch_id, trainer.config["Global"]["epochs"], iter_id, 66 | len(trainer.train_dataloader), lr_msg, metric_msg, time_msg, ips_msg, 67 | eta_msg)) 68 | 69 | logger.scaler( 70 | name="lr", 71 | value=trainer.optimizer.get_lr(), 72 | step=trainer.global_step, 73 | writer=trainer.vdl_writer) 74 | for key in trainer.output_info: 75 | logger.scaler( 76 | name="train_{}".format(key), 77 | value=trainer.output_info[key].avg, 78 | step=trainer.global_step, 79 | writer=trainer.vdl_writer) 80 | -------------------------------------------------------------------------------- /plsc/loss/__init__.py: -------------------------------------------------------------------------------- 1 | #copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. 2 | # 3 | #Licensed under the Apache License, Version 2.0 (the "License"); 4 | #you may not use this file except in compliance with the License. 5 | #You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | #Unless required by applicable law or agreed to in writing, software 10 | #distributed under the License is distributed on an "AS IS" BASIS, 11 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | #See the License for the specific language governing permissions and 13 | #limitations under the License. 14 | 15 | import copy 16 | 17 | import paddle 18 | import paddle.nn as nn 19 | 20 | from plsc.utils import logger 21 | from .celoss import CELoss, ViTCELoss 22 | from .marginloss import MarginLoss 23 | 24 | 25 | class CombinedLoss(nn.Layer): 26 | def __init__(self, config_list): 27 | super().__init__() 28 | self.loss_func = [] 29 | self.loss_weight = [] 30 | assert isinstance(config_list, list), ( 31 | 'operator config should be a list') 32 | for config in config_list: 33 | assert isinstance(config, 34 | dict) and len(config) == 1, "yaml format error" 35 | name = list(config)[0] 36 | param = config[name] 37 | assert "weight" in param, "weight must be in param, but param just contains {}".format( 38 | param.keys()) 39 | self.loss_weight.append(param.pop("weight")) 40 | self.loss_func.append(eval(name)(**param)) 41 | 42 | def __call__(self, input, target): 43 | if isinstance(input, dict) and input["logits"].dtype == paddle.float16: 44 | input["logits"] = paddle.cast(input["logits"], 'float32') 45 | elif input.dtype == paddle.float16: 46 | input = paddle.cast(input, 'float32') 47 | 48 | loss_dict = {} 49 | for idx, loss_func in enumerate(self.loss_func): 50 | loss = loss_func(input, target) 51 | weight = self.loss_weight[idx] 52 | loss = {key: loss[key] * weight for key in loss} 53 | loss_dict.update(loss) 54 | loss_dict["loss"] = paddle.add_n(list(loss_dict.values())) 55 | return loss_dict 56 | 57 | 58 | def build_loss(config): 59 | module_class = CombinedLoss(copy.deepcopy(config)) 60 | logger.debug("build loss {} success.".format(module_class)) 61 | return module_class 62 | -------------------------------------------------------------------------------- /plsc/loss/marginloss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | import paddle 15 | import paddle.nn as nn 16 | import paddle.nn.functional as F 17 | 18 | 19 | class MarginLoss(nn.Layer): 20 | """ 21 | SphereFace: https://arxiv.org/abs/1704.08063 22 | m1=1.35, m2=0.0, m3=0.0, s=64.0 23 | CosFace: https://arxiv.org/abs/1801.09414 24 | m1=1.0, m2=0.0, m3=0.4, s=64.0 25 | ArcFace: https://arxiv.org/abs/1801.07698 26 | m1=1.0, m2=0.5, m3=0.0, s=64.0 27 | 28 | Default: ArcFace 29 | """ 30 | 31 | def __init__(self, m1=1.0, m2=0.5, m3=0.0, s=64.0, model_parallel=False): 32 | super().__init__() 33 | self.m1 = m1 34 | self.m2 = m2 35 | self.m3 = m3 36 | self.s = s 37 | 38 | # Default we use model parallel when group=None. 39 | # When group=False, it is equal to data parallel. 40 | self.group = None 41 | if not model_parallel: 42 | self.group = False 43 | 44 | def forward(self, x, label): 45 | if isinstance(x, dict): 46 | x = x["logits"] 47 | 48 | loss = F.margin_cross_entropy( 49 | x, 50 | label, 51 | margin1=self.m1, 52 | margin2=self.m2, 53 | margin3=self.m3, 54 | scale=self.s, 55 | return_softmax=False, 56 | reduction=None, 57 | group=self.group, ) 58 | 59 | loss = loss.mean() 60 | return {"MarginLoss": loss} 61 | -------------------------------------------------------------------------------- /plsc/metric/__init__.py: -------------------------------------------------------------------------------- 1 | #copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. 2 | # 3 | #Licensed under the Apache License, Version 2.0 (the "License"); 4 | #you may not use this file except in compliance with the License. 5 | #You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | #Unless required by applicable law or agreed to in writing, software 10 | #distributed under the License is distributed on an "AS IS" BASIS, 11 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | #See the License for the specific language governing permissions and 13 | #limitations under the License. 14 | 15 | from paddle import nn 16 | import copy 17 | from collections import OrderedDict 18 | 19 | from .metrics import * 20 | 21 | 22 | class CombinedMetrics(nn.Layer): 23 | def __init__(self, config_list): 24 | super().__init__() 25 | self.metric_func_list = [] 26 | assert isinstance(config_list, list), ( 27 | 'operator config should be a list') 28 | for config in config_list: 29 | assert isinstance(config, 30 | dict) and len(config) == 1, "yaml format error" 31 | metric_name = list(config)[0] 32 | metric_params = config[metric_name] 33 | if metric_params is not None: 34 | self.metric_func_list.append( 35 | eval(metric_name)(**metric_params)) 36 | else: 37 | self.metric_func_list.append(eval(metric_name)()) 38 | 39 | def __call__(self, *args, **kwargs): 40 | metric_dict = OrderedDict() 41 | for idx, metric_func in enumerate(self.metric_func_list): 42 | metric_dict.update(metric_func(*args, **kwargs)) 43 | return metric_dict 44 | 45 | 46 | def build_metrics(config): 47 | metrics_list = CombinedMetrics(copy.deepcopy(config)) 48 | return metrics_list 49 | -------------------------------------------------------------------------------- /plsc/models/__init__.py: -------------------------------------------------------------------------------- 1 | #copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. 2 | # 3 | #Licensed under the Apache License, Version 2.0 (the "License"); 4 | #you may not use this file except in compliance with the License. 5 | #You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | #Unless required by applicable law or agreed to in writing, software 10 | #distributed under the License is distributed on an "AS IS" BASIS, 11 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | #See the License for the specific language governing permissions and 13 | #limitations under the License. 14 | 15 | import copy 16 | import importlib 17 | 18 | from .base_model import Model 19 | from .vision_transformer import * 20 | from .deit import * 21 | from .iresnet import * 22 | from .face_vit import * 23 | from .mobilefacenet import * 24 | from .cait import * 25 | from .mae import * 26 | from .convmae import * 27 | from .swin_transformer import * 28 | from .cae import * 29 | from .convnext import * 30 | 31 | __all__ = ["build_model"] 32 | 33 | 34 | def build_model(config): 35 | config = copy.deepcopy(config) 36 | model_type = config.pop("name") 37 | mod = importlib.import_module(__name__) 38 | model = getattr(mod, model_type)(**config) 39 | assert isinstance(model, 40 | Model), 'model must inherit from plsc.models.Model' 41 | return model 42 | -------------------------------------------------------------------------------- /plsc/models/base_model.py: -------------------------------------------------------------------------------- 1 | # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | from abc import ABCMeta, abstractmethod 20 | 21 | import paddle 22 | import paddle.nn as nn 23 | 24 | 25 | class Model(nn.Layer): 26 | __metaclass__ = ABCMeta 27 | 28 | def __init__(self): 29 | super().__init__() 30 | 31 | @abstractmethod 32 | def load_pretrained(self, path, rank=0, finetune=False): 33 | raise Exception( 34 | "NotImplementedError, you must overwrite load_pretrained method in subclass." 35 | ) 36 | 37 | @abstractmethod 38 | def save(self, path, local_rank=0, rank=0): 39 | raise Exception( 40 | "NotImplementedError, you must overwrite save method in subclass.") 41 | -------------------------------------------------------------------------------- /plsc/models/convmae/__init__.py: -------------------------------------------------------------------------------- 1 | #copyright (c) 2023 PaddlePaddle Authors. All Rights Reserve. 2 | # 3 | #Licensed under the Apache License, Version 2.0 (the "License"); 4 | #you may not use this file except in compliance with the License. 5 | #You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | #Unless required by applicable law or agreed to in writing, software 10 | #distributed under the License is distributed on an "AS IS" BASIS, 11 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | #See the License for the specific language governing permissions and 13 | #limitations under the License. 14 | 15 | from .conv_mae import * 16 | from .conv_vit import * 17 | -------------------------------------------------------------------------------- /plsc/models/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | from .ema import EMA 15 | -------------------------------------------------------------------------------- /plsc/nn/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from .partialfc import * 16 | -------------------------------------------------------------------------------- /plsc/nn/norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from paddle import _C_ops 16 | 17 | 18 | def l2_normalize(x, axis, epsilon=1e-12, name=None): 19 | r""" 20 | This op normalizes `x` along dimension `axis` using an L2 21 | norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes 22 | .. math:: 23 | y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }} 24 | For `x` with more dimensions, this layer independently normalizes each 1-D 25 | slice along dimension `axis`. 26 | Args: 27 | x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float16, float32 or float64. 28 | axis(int): The axis on which to apply normalization. If `axis < 0`, \ 29 | the dimension to normalization is rank(X) + axis. -1 is the 30 | last dimension. 31 | epsilon(float): The epsilon value is used to avoid division by zero, \ 32 | the default value is 1e-12. 33 | name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` 34 | Returns: 35 | Variable: The output has the same shape and data type with `x`. 36 | """ 37 | if len(x.shape) == 1: 38 | axis = 0 39 | out, _ = _C_ops.norm(x, 1 if axis is None else axis, epsilon, False) 40 | return out 41 | -------------------------------------------------------------------------------- /plsc/optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | from __future__ import absolute_import 15 | from __future__ import division 16 | from __future__ import print_function 17 | 18 | from collections import defaultdict 19 | 20 | import copy 21 | import paddle 22 | 23 | from plsc.core.grad_clip import ClipGradByGlobalNorm 24 | from plsc.core.param_fuse import get_fused_params 25 | 26 | from plsc.utils import logger 27 | 28 | from .optimizer import Optimizer 29 | from .adamw import AdamW 30 | from .adafactor import Adafactor 31 | from .momentum import Momentum 32 | from .momentum_lars import MomentumLARS 33 | 34 | 35 | def build_optimizer(config, lr_scheduler, model=None): 36 | config = copy.deepcopy(config) 37 | 38 | grad_clip = None 39 | grad_clip_config = config.pop('grad_clip', None) 40 | if grad_clip_config is not None: 41 | grad_clip_name = grad_clip_config.pop('name', 'ClipGradByGlobalNorm') 42 | grad_clip = eval(grad_clip_name)(**grad_clip_config) 43 | 44 | no_weight_decay_name = config.pop('no_weight_decay_name', []) 45 | 46 | param_group = defaultdict(list) 47 | for n, p in model.named_parameters(): 48 | state = copy.deepcopy(p.__dict__) 49 | if any(nd in n for nd in no_weight_decay_name): 50 | state['no_weight_decay'] = True 51 | param_group[str(state)].append(p) 52 | 53 | # fuse params 54 | for key in param_group: 55 | if 'gpu' not in paddle.get_device(): 56 | continue 57 | if "'is_distributed': True" in key: 58 | continue 59 | if "'has_sparse_grad': True" in key: 60 | continue 61 | 62 | param_group[key] = get_fused_params(param_group[key]) 63 | 64 | # bulid optimizer params 65 | params = [] 66 | for key in param_group: 67 | group = {'params': param_group[key]} 68 | 69 | if "'is_distributed': True" in key: 70 | group['is_distributed'] = True 71 | 72 | if 'no_weight_decay' in key: 73 | group['weight_decay'] = 0.0 74 | 75 | params.append(group) 76 | 77 | optim_name = config.pop('name') 78 | optim = eval(optim_name)(params, 79 | lr=lr_scheduler, 80 | grad_clip=grad_clip, 81 | **config) 82 | logger.debug("build optimizer ({}) success..".format(optim)) 83 | return optim 84 | -------------------------------------------------------------------------------- /plsc/scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | import paddle 15 | 16 | from plsc.utils import logger 17 | 18 | from .lr_scheduler import TimmCosine, ViTLRScheduler, Step, Poly 19 | 20 | 21 | def build_lr_scheduler(lr_config, epochs, step_each_epoch): 22 | lr_config.update({'epochs': epochs, 'step_each_epoch': step_each_epoch}) 23 | if 'name' in lr_config: 24 | lr_name = lr_config.pop('name') 25 | lr = eval(lr_name)(**lr_config) 26 | if isinstance(lr, paddle.optimizer.lr.LRScheduler): 27 | return lr 28 | else: 29 | return lr() 30 | else: 31 | lr = lr_config['learning_rate'] 32 | logger.debug("build lr ({}) success..".format(lr)) 33 | return lr 34 | -------------------------------------------------------------------------------- /plsc/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pillow 2 | numpy 3 | easydict 4 | scikit-image 5 | scipy 6 | requests 7 | prettytable 8 | tqdm 9 | visualdl 10 | scikit-learn>=0.23.2 11 | opencv-python>=4.2.0.32 12 | onnxruntime-gpu==1.10.0 13 | onnx==1.9.0 14 | paddle2onnx==0.9.4 15 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import os 16 | from setuptools import find_packages 17 | from setuptools import setup 18 | 19 | here = os.path.abspath(os.path.dirname(__file__)) 20 | try: 21 | README = open(os.path.join(here, 'README.md'), encoding='utf-8').read() 22 | except IOError: 23 | README = '' 24 | 25 | with open('requirements.txt', encoding="utf-8-sig") as f: 26 | requirements = f.readlines() 27 | 28 | __version__ = None 29 | 30 | with open(os.path.join(here, 'version.py')) as f: 31 | exec(f.read(), globals()) # pylint: disable=exec-used 32 | 33 | setup( 34 | name='plsc', 35 | version=__version__, 36 | description='PLSC is an open source repo for a collection of Paddle Large Scale Classification Tools, which supports large-scale classification model pre-training as well as finetune for downstream tasks.', 37 | long_description=README, 38 | long_description_content_type='text/markdown', 39 | classifiers=[ 40 | 'Intended Audience :: Developers', 41 | 'Intended Audience :: Science/Research', 42 | 'License :: OSI Approved :: Apache Software License', 43 | 'Programming Language :: Python :: 3.7', 44 | 'Topic :: Scientific/Engineering :: Artificial Intelligence', 45 | ], 46 | keywords=[ 47 | 'face-recognition', 'vision-classification', 'large-scale', 48 | 'distributed-training', 'data-parallel', 'model-parallel', 'resnet', 49 | 'vit', 'face-vit', 'deit', 'partial-fc', 'arcface' 50 | ], 51 | author='PLSC Contributors', 52 | url='https://github.com/PaddlePaddle/PLSC', 53 | download_url='https://github.com/PaddlePaddle/PLSC.git', 54 | packages=find_packages(), 55 | zip_safe=False, 56 | entry_points={ 57 | "console_scripts": [ 58 | "plsc-train = tools.train:main", 59 | "plsc-eval = tools.eval:main", 60 | "plsc-export = tools.export:main", 61 | ], 62 | }, 63 | install_requires=requirements, ) 64 | -------------------------------------------------------------------------------- /task/accelerate/tome/README.md: -------------------------------------------------------------------------------- 1 | # Token Merging: Your ViT but Faster 2 | 3 | PaddlePaddle reimplementation of [Facebook's repository for **ToMe**](https://github.com/facebookresearch/ToMe) that was released with the paper [Token Merging: Your ViT but Faster](https://arxiv.org/abs/2210.09461). 4 | Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, Judy Hoffman. 5 | 6 | ## What is ToMe? 7 | ![ToMe Concept Figure](https://github.com/facebookresearch/ToMe/blob/main/examples/images/concept_figure.png?raw=true) 8 | 9 | Token Merging (ToMe) allows you to take an existing Vision Transformer architecture and efficiently merge tokens inside of the network for **2-3x** faster evaluation (see [benchmark script](examples/1_benchmark_timm.ipynb)). ToMe is tuned to seamlessly fit inside existing vision transformers, so you can use it without having to do additional training (see [eval script](examples/0_validation_timm.ipynb)). And if you *do* use ToMe during training, you can reduce the accuracy drop even further while also speeding up training considerably. 10 | 11 | ## What ToMe does 12 | 13 | ![ToMe Visualization](https://github.com/facebookresearch/ToMe/blob/main/examples/images/image_vis.png?raw=true) 14 | 15 | ToMe merges tokens based on their similarity, implicitly grouping parts of objects together. This is in contrast to token pruning, which only removes background tokens. ToMe can get away with reducing more tokens because we can merge redundant foreground tokens in addition to background ones. Visualization of merged tokens on ImageNet-1k val using a trained ViT-H/14 MAE model with ToMe. See [this example](examples/2_visualization_timm.ipynb) for how to produce these visualizations. For more, see the paper appendix. 16 | 17 | 18 | ## Usage 19 | 20 | We provide a simple and fast running notebook, see [validation_tome_vit.ipynb](./validation_tome_vit.ipynb) 21 | 22 | ### How to patch your model 23 | 24 | We provide a simple patch method for easy use: 25 | 26 | ```python 27 | from plsc.models import vision_transformer 28 | from plsc.models.utils import tome 29 | 30 | # Create model and load a pretrained model. 31 | model = vision_transformer.ViT_base_patch16_224() 32 | model.load_pretrained('models/imagenet2012-ViT-B_16-224') 33 | # Patch the model with ToMe. 34 | tome.apply_patch(model) 35 | # Set the number of tokens reduced per layer. See paper for details. 36 | model.r = 16 37 | ``` 38 | 39 | ## Citation 40 | 41 | ``` 42 | @inproceedings{bolya2022tome, 43 | title={Token Merging: Your {ViT} but Faster}, 44 | author={Bolya, Daniel and Fu, Cheng-Yang and Dai, Xiaoliang and Zhang, Peizhao and Feichtenhofer, Christoph and Hoffman, Judy}, 45 | booktitle={International Conference on Learning Representations}, 46 | year={2023} 47 | } 48 | ``` 49 | -------------------------------------------------------------------------------- /task/classification/cait/README.md: -------------------------------------------------------------------------------- 1 | # CaiT 2 | 3 | PaddlePaddle reimplementation of [facebookresearch's repository for the cait model](https://github.com/facebookresearch/deit) that was released with the paper [CaiT: Going deeper with Image Transformers](https://arxiv.org/abs/2103.17239). 4 | 5 | ## Requirements 6 | To enjoy some new features, PaddlePaddle 2.4 is required. For more installation tutorials 7 | refer to [installation.md](../../../tutorials/get_started/installation.md) 8 | 9 | ## How to Train 10 | 11 | ```bash 12 | # Note: Set the following environment variables 13 | # and then need to run the script on each node. 14 | export PADDLE_NNODES=1 15 | export PADDLE_MASTER="xxx.xxx.xxx.xxx:12538" 16 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 17 | 18 | python -m paddle.distributed.launch \ 19 | --nnodes=$PADDLE_NNODES \ 20 | --master=$PADDLE_MASTER \ 21 | --devices=$CUDA_VISIBLE_DEVICES \ 22 | plsc-train \ 23 | -c ./configs/cait_s24_224_in1k_1n8c_dp_fp16o2.yaml 24 | ``` 25 | 26 | ## How to Evaluation 27 | 28 | ```bash 29 | # [Optional] Download checkpoint 30 | mkdir -p pretrained/ 31 | wget -O ./pretrained/cait_s24_224_in1k_1n8c_dp_fp16o2.pdparams https://plsc.bj.bcebos.com/models/cait/v2.4/cait_s24_224_in1k_1n8c_dp_fp16o2.pdparams 32 | 33 | ``` 34 | 35 | ```bash 36 | export PADDLE_NNODES=1 37 | export PADDLE_MASTER="127.0.0.1:12538" 38 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 39 | python -m paddle.distributed.launch \ 40 | --nnodes=$PADDLE_NNODES \ 41 | --master=$PADDLE_MASTER \ 42 | --devices=$CUDA_VISIBLE_DEVICES \ 43 | plsc-eval \ 44 | -c ./configs/cait_s24_224_in1k_1n8c_dp_fp16o2.yaml \ 45 | -o Global.pretrained_model=pretrained/cait_s24_224_in1k_1n8c_dp_fp16o2 \ 46 | -o Global.finetune=False 47 | ``` 48 | 49 | ## Other Configurations 50 | We provide more directly runnable configurations, see [CaiT Configurations](./configs/). 51 | 52 | 53 | ## Models 54 | 55 | | Model | Phase | Dataset | Configs | GPUs | Img/sec | Top1 Acc | Pre-trained checkpoint | Fine-tuned checkpoint | Log | 56 | | ------------ | -------- | ------------ | ------------------------------------------------------------ | --------- | ------- | -------- | ------------------------------------------------------------ | --------------------- | ------------------------------------------------------------ | 57 | | cait_s24_224 | pretrain | ImageNet2012 | [config](./configs/cait_s24_224_in1k_1n8c_dp_fp16o2.yaml) | A100*N1C8 | 2473 | 0.82628 | [download](https://plsc.bj.bcebos.com/models/cait/v2.4/cait_s24_224_in1k_1n8c_dp_fp16o2.pdparams) | | [log](https://plsc.bj.bcebos.com/models/cait/v2.4/cait_s24_224_in1k_1n8c_dp_fp16o2.log) | 58 | 59 | 60 | 61 | ## Citations 62 | 63 | ```bibtex 64 | @InProceedings{Touvron_2021_ICCV, 65 | author = {Touvron, Hugo and Cord, Matthieu and Sablayrolles, Alexandre and Synnaeve, Gabriel and J\'egou, Herv\'e}, 66 | title = {Going Deeper With Image Transformers}, 67 | booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, 68 | month = {October}, 69 | year = {2021}, 70 | pages = {32-42} 71 | } 72 | ``` 73 | -------------------------------------------------------------------------------- /task/classification/cait/train.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Note: Set the following environment variables 16 | # and then need to run the script on each node. 17 | 18 | #export PADDLE_NNODES=1 19 | #export PADDLE_MASTER="xxx.xxx.xxx.xxx:12538" 20 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 21 | 22 | python -m paddle.distributed.launch \ 23 | --nnodes=$PADDLE_NNODES \ 24 | --master=$PADDLE_MASTER \ 25 | --devices=$CUDA_VISIBLE_DEVICES \ 26 | plsc-train \ 27 | -c ./configs/cait_s24_224_in1k_1n8c_dp_fp16o2.yaml 28 | -------------------------------------------------------------------------------- /task/classification/convnext/README.md: -------------------------------------------------------------------------------- 1 | # ConvNeXt 2 | 3 | PaddlePaddle reimplementation of [facebookresearch's repository for the ConvneXt model](https://github.com/facebookresearch/ConvNeXt) that was released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545). 4 | 5 | ## Requirements 6 | To enjoy some new features, PaddlePaddle 2.4 is required. For more installation tutorials 7 | refer to [installation.md](../../../tutorials/get_started/installation.md) 8 | 9 | ## How to Train 10 | 11 | ```bash 12 | # Note: Set the following environment variables 13 | # and then need to run the script on each node. 14 | #export PADDLE_NNODES=4 15 | #export PADDLE_MASTER="xxx.xxx.xxx.xxx:12538" 16 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 17 | 18 | python -m paddle.distributed.launch \ 19 | --nnodes=$PADDLE_NNODES \ 20 | --master=$PADDLE_MASTER \ 21 | --devices=$CUDA_VISIBLE_DEVICES \ 22 | plsc-train \ 23 | -c ./configs/ConvNeXt_base_224_in1k_4n32c_dp_fp32.yaml 24 | ``` 25 | 26 | ## How to Evaluation 27 | 28 | ```bash 29 | # [Optional] Download checkpoint 30 | mkdir -p pretrained/ 31 | wget -O ./pretrained/ConvNeXt_base_224_in1k_dp_fp32.pdparams https://plsc.bj.bcebos.com/models/convnext/v2.5/ConvNeXt_base_224_in1k_dp_fp32.pdparams 32 | 33 | ``` 34 | 35 | ```bash 36 | export PADDLE_NNODES=1 37 | export PADDLE_MASTER="127.0.0.1:12538" 38 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 39 | 40 | python -m paddle.distributed.launch \ 41 | --nnodes=$PADDLE_NNODES \ 42 | --master=$PADDLE_MASTER \ 43 | --devices=$CUDA_VISIBLE_DEVICES \ 44 | plsc-eval \ 45 | -c ./configs/ConvNeXt_base_224_in1k_1n8c_dp_fp32.yaml \ 46 | -o Global.pretrained_model=pretrained/ConvNeXt_base_224_in1k_dp_fp32 \ 47 | -o Global.finetune=False 48 | ``` 49 | 50 | ## Other Configurations 51 | We provide more directly runnable configurations, see [ConvNeXt Configurations](./configs/). 52 | 53 | 54 | ## Models 55 | 56 | | Model | DType | Phase | Dataset | Configs | GPUs | Img/sec | Top1 Acc | Pre-trained checkpoint | Log | 57 | |---------------|-------|----------| ------------ |---------------------------------------------------------------|------------|--------|----------|------------------------|-------------| 58 | | convnext_base | FP32 | pretrain | ImageNet2012 | [config](./configs/ConvNeXt_base_224_in1k_4n32c_dp_fp32.yaml) | A100*N4C32 | 7800 | 0.838 | [download](https://plsc.bj.bcebos.com/models/convnext/v2.5/ConvNeXt_base_224_in1k_dp_fp32.pdparams) | [log](https://plsc.bj.bcebos.com/models/convnext/v2.5/ConvNeXt_base_224_in1k_dp_fp32.log) | 59 | 60 | 61 | 62 | ## Citations 63 | 64 | ```bibtex 65 | @Article{liu2022convnet, 66 | author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, 67 | title = {A ConvNet for the 2020s}, 68 | journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, 69 | year = {2022}, 70 | } 71 | ``` 72 | -------------------------------------------------------------------------------- /task/classification/swin/train.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Note: Set the following environment variables 16 | # and then need to run the script on each node. 17 | 18 | #export PADDLE_NNODES=1 19 | #export PADDLE_MASTER="xxx.xxx.xxx.xxx:12538" 20 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 21 | 22 | LD_LIBRARY_PATH=/usr/local/cuda/compat:$LD_LIBRARY_PATH 23 | 24 | python -m paddle.distributed.launch \ 25 | --nnodes=$PADDLE_NNODES \ 26 | --master=$PADDLE_MASTER \ 27 | --devices=$CUDA_VISIBLE_DEVICES \ 28 | plsc-train \ 29 | -c ./configs/swin_base_patch4_window7_224_in1k_1n8c_dp_fp16o2.yaml 30 | -------------------------------------------------------------------------------- /task/classification/vit/configs/ViT_large_patch16_224_in21k_4n32c_dp_fp16o2.yaml: -------------------------------------------------------------------------------- 1 | # global configs 2 | Global: 3 | checkpoint: null 4 | pretrained_model: null 5 | output_dir: ./output/ 6 | device: gpu 7 | save_interval: 1 8 | max_num_latest_checkpoint: 0 9 | eval_during_train: True 10 | eval_interval: 1 11 | eval_unit: "epoch" 12 | accum_steps: 1 13 | epochs: 90 14 | print_batch_step: 10 15 | use_visualdl: False 16 | seed: 2021 17 | 18 | # FP16 setting 19 | FP16: 20 | level: O2 21 | GradScaler: 22 | init_loss_scaling: 65536.0 23 | 24 | DistributedStrategy: 25 | data_parallel: True 26 | 27 | # model architecture 28 | Model: 29 | name: ViT_large_patch16_224 30 | class_num: 21841 31 | drop_rate: 0.1 32 | 33 | # loss function config for traing/eval process 34 | Loss: 35 | Train: 36 | - ViTCELoss: 37 | weight: 1.0 38 | epsilon: 0.0001 39 | Eval: 40 | - CELoss: 41 | weight: 1.0 42 | 43 | LRScheduler: 44 | name: ViTLRScheduler 45 | learning_rate: 1e-3 46 | decay_type: linear 47 | warmup_steps: 10000 48 | 49 | Optimizer: 50 | name: AdamW 51 | betas: (0.9, 0.999) 52 | epsilon: 1e-6 53 | weight_decay: 0.15 54 | exp_avg_force_fp32: True 55 | 56 | # data loader for train and eval 57 | DataLoader: 58 | Train: 59 | dataset: 60 | name: ImageNetDataset 61 | image_root: ./dataset/ImageNet21K/ 62 | multi_label: True 63 | class_num: 21841 64 | cls_label_path: ./dataset/ImageNet21K/image_all_list.txt 65 | transform_ops: 66 | - DecodeImage: 67 | to_rgb: True 68 | channel_first: False 69 | - RandCropImage: 70 | size: 224 71 | interpolation: bicubic 72 | backend: pil 73 | - RandFlipImage: 74 | flip_code: 1 75 | - NormalizeImage: 76 | scale: 1.0/255.0 77 | mean: [0.5, 0.5, 0.5] 78 | std: [0.5, 0.5, 0.5] 79 | order: '' 80 | - ToCHWImage: 81 | sampler: 82 | name: DistributedBatchSampler 83 | batch_size: 128 84 | drop_last: True 85 | shuffle: True 86 | loader: 87 | num_workers: 8 88 | use_shared_memory: True 89 | 90 | Eval: 91 | dataset: 92 | name: ImageNetDataset 93 | image_root: ./dataset/ImageNet21K/ 94 | cls_label_path: ./dataset/ImageNet21K/image_dummy_val_list.txt 95 | transform_ops: 96 | - DecodeImage: 97 | to_rgb: True 98 | channel_first: False 99 | - ResizeImage: 100 | resize_short: 256 101 | interpolation: bicubic 102 | backend: pil 103 | - CenterCropImage: 104 | size: 224 105 | - NormalizeImage: 106 | scale: 1.0/255.0 107 | mean: [0.5, 0.5, 0.5] 108 | std: [0.5, 0.5, 0.5] 109 | order: '' 110 | - ToCHWImage: 111 | 112 | sampler: 113 | name: DistributedBatchSampler 114 | batch_size: 256 115 | drop_last: False 116 | shuffle: False 117 | loader: 118 | num_workers: 8 119 | use_shared_memory: True 120 | 121 | Metric: 122 | Eval: 123 | - TopkAcc: 124 | topk: [1, 5] 125 | 126 | Export: 127 | export_type: paddle 128 | input_shape: [None, 3, 224, 224] 129 | -------------------------------------------------------------------------------- /task/classification/vit/configs/ViT_large_patch16_384_in1k_ft_4n32c_dp_fp16o2.yaml: -------------------------------------------------------------------------------- 1 | # global configs 2 | Global: 3 | checkpoint: null 4 | finetune: True 5 | pretrained_model: ./pretrained/ViT_large_patch16_224/latest 6 | output_dir: ./output/ 7 | device: gpu 8 | save_interval: 1 9 | max_num_latest_checkpoint: 0 10 | eval_during_train: True 11 | eval_interval: 1 12 | eval_unit: "epoch" 13 | accum_steps: 1 14 | epochs: 8 15 | print_batch_step: 10 16 | use_visualdl: False 17 | seed: 2021 18 | 19 | # FP16 setting 20 | FP16: 21 | level: O2 22 | GradScaler: 23 | init_loss_scaling: 65536.0 24 | 25 | DistributedStrategy: 26 | data_parallel: True 27 | 28 | # model architecture 29 | Model: 30 | name: ViT_large_patch16_384 31 | class_num: 1000 32 | drop_rate: 0.1 33 | 34 | # loss function config for traing/eval process 35 | Loss: 36 | Train: 37 | - ViTCELoss: 38 | type: softmax 39 | weight: 1.0 40 | Eval: 41 | - CELoss: 42 | weight: 1.0 43 | 44 | LRScheduler: 45 | name: ViTLRScheduler 46 | learning_rate: 0.015 47 | decay_type: cosine 48 | warmup_steps: 500 49 | 50 | Optimizer: 51 | name: Momentum 52 | momentum: 0.9 53 | weight_decay: 0.0001 54 | grad_clip: 55 | name: ClipGradByGlobalNorm 56 | clip_norm: 1.5 57 | 58 | # data loader for train and eval 59 | DataLoader: 60 | Train: 61 | dataset: 62 | name: ImageNetDataset 63 | image_root: ./dataset/ILSVRC2012/ 64 | class_num: 1000 65 | cls_label_path: ./dataset/ILSVRC2012/train_list.txt 66 | transform_ops: 67 | - DecodeImage: 68 | to_rgb: True 69 | channel_first: False 70 | - RandCropImage: 71 | size: 384 72 | scale: [0.05, 1.0] 73 | interpolation: bilinear 74 | backend: pil 75 | - RandFlipImage: 76 | flip_code: 1 77 | - NormalizeImage: 78 | scale: 1.0/255.0 79 | mean: [0.5, 0.5, 0.5] 80 | std: [0.5, 0.5, 0.5] 81 | order: '' 82 | - ToCHWImage: 83 | 84 | sampler: 85 | name: DistributedBatchSampler 86 | batch_size: 16 # total batchsize 512 87 | drop_last: True 88 | shuffle: True 89 | loader: 90 | num_workers: 8 91 | use_shared_memory: True 92 | 93 | Eval: 94 | dataset: 95 | name: ImageNetDataset 96 | image_root: ./dataset/ILSVRC2012/ 97 | cls_label_path: ./dataset/ILSVRC2012/val_list.txt 98 | transform_ops: 99 | - DecodeImage: 100 | to_rgb: True 101 | channel_first: False 102 | - ResizeImage: 103 | size: 384 104 | interpolation: bilinear 105 | backend: pil 106 | - NormalizeImage: 107 | scale: 1.0/255.0 108 | mean: [0.5, 0.5, 0.5] 109 | std: [0.5, 0.5, 0.5] 110 | order: '' 111 | - ToCHWImage: 112 | 113 | sampler: 114 | name: DistributedBatchSampler 115 | batch_size: 256 116 | drop_last: False 117 | shuffle: False 118 | loader: 119 | num_workers: 8 120 | use_shared_memory: True 121 | 122 | Metric: 123 | Eval: 124 | - TopkAcc: 125 | topk: [1, 5] 126 | 127 | Export: 128 | export_type: paddle 129 | input_shape: [None, 3, 384, 384] 130 | -------------------------------------------------------------------------------- /task/recognition/face/configs/IResNet100_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.yaml: -------------------------------------------------------------------------------- 1 | # global configs 2 | Global: 3 | task_type: recognition 4 | train_epoch_func: default_train_one_epoch 5 | eval_func: face_verification_eval 6 | checkpoint: null 7 | pretrained_model: null 8 | output_dir: ./output/ 9 | device: gpu 10 | save_interval: 1 11 | max_num_latest_checkpoint: 0 12 | eval_during_train: True 13 | eval_interval: 10370 14 | eval_unit: "step" 15 | accum_steps: 1 16 | epochs: 20 17 | print_batch_step: 10 18 | use_visualdl: True 19 | seed: 2022 20 | 21 | # FP16 setting 22 | FP16: 23 | level: O1 24 | GradScaler: 25 | init_loss_scaling: 27648.0 26 | 27 | DistributedStrategy: 28 | data_parallel: True 29 | 30 | # model architecture 31 | Model: 32 | name: IResNet100 33 | num_features : 512 34 | data_format : "NHWC" 35 | class_num: 2059906 36 | pfc_config: 37 | sample_ratio: 0.2 38 | model_parallel: True 39 | 40 | # loss function config for traing/eval process 41 | Loss: 42 | Train: 43 | - MarginLoss: 44 | m1: 1.0 45 | m2: 0.0 46 | m3: 0.4 47 | s: 64.0 48 | model_parallel: True 49 | weight: 1.0 50 | 51 | LRScheduler: 52 | name: Poly 53 | learning_rate: 0.1 54 | decay_unit: step 55 | 56 | Optimizer: 57 | name: Momentum 58 | momentum: 0.9 59 | weight_decay: 5e-4 60 | use_master_param: False 61 | grad_clip: 62 | name: ClipGradByGlobalNorm 63 | clip_norm: 5.0 64 | no_clip_list: ['dist'] 65 | 66 | # data loader for train and eval 67 | DataLoader: 68 | Train: 69 | dataset: 70 | name: FaceIdentificationDataset 71 | image_root: ./dataset/WebFace42M 72 | cls_label_path: ./dataset/WebFace42M/label.txt 73 | transform_ops: 74 | - DecodeImage: 75 | to_rgb: True 76 | channel_first: False 77 | - RandFlipImage: 78 | flip_code: 1 79 | - NormalizeImage: 80 | scale: 1.0/255.0 81 | mean: [0.5, 0.5, 0.5] 82 | std: [0.5, 0.5, 0.5] 83 | order: '' 84 | - ToCHWImage: 85 | sampler: 86 | name: DistributedBatchSampler 87 | batch_size: 128 88 | drop_last: False 89 | shuffle: True 90 | loader: 91 | num_workers: 8 92 | use_shared_memory: True 93 | 94 | Eval: 95 | dataset: 96 | name: FaceVerificationDataset 97 | image_root: ./dataset/WebFace42M/agedb_30 98 | cls_label_path: ./dataset/WebFace42M/agedb_30/label.txt 99 | transform_ops: 100 | - DecodeImage: 101 | to_rgb: True 102 | channel_first: False 103 | - NormalizeImage: 104 | scale: 1.0/255.0 105 | mean: [0.5, 0.5, 0.5] 106 | std: [0.5, 0.5, 0.5] 107 | order: '' 108 | - ToCHWImage: 109 | sampler: 110 | name: BatchSampler 111 | batch_size: 128 112 | drop_last: False 113 | shuffle: False 114 | loader: 115 | num_workers: 0 116 | use_shared_memory: True 117 | 118 | Metric: 119 | Eval: 120 | - LFWAcc: 121 | flip_test: True 122 | 123 | Export: 124 | export_type: onnx 125 | input_shape: [None, 3, 112, 112] 126 | -------------------------------------------------------------------------------- /task/recognition/face/configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml: -------------------------------------------------------------------------------- 1 | # global configs 2 | Global: 3 | task_type: recognition 4 | train_epoch_func: default_train_one_epoch 5 | eval_func: face_verification_eval 6 | checkpoint: null 7 | pretrained_model: null 8 | output_dir: ./output/ 9 | device: gpu 10 | save_interval: 1 11 | max_num_latest_checkpoint: 0 12 | eval_during_train: True 13 | eval_interval: 2000 14 | eval_unit: "step" 15 | accum_steps: 1 16 | epochs: 20 17 | print_batch_step: 100 18 | use_visualdl: True 19 | seed: 2022 20 | 21 | # FP16 setting 22 | FP16: 23 | level: O1 24 | GradScaler: 25 | init_loss_scaling: 27648.0 26 | 27 | DistributedStrategy: 28 | data_parallel: True 29 | 30 | # model architecture 31 | Model: 32 | name: IResNet50 33 | num_features : 512 34 | data_format : "NHWC" 35 | class_num: 93431 36 | pfc_config: 37 | sample_ratio: 1.0 38 | model_parallel: True 39 | 40 | # loss function config for traing/eval process 41 | Loss: 42 | Train: 43 | - MarginLoss: 44 | m1: 1.0 45 | m2: 0.5 46 | m3: 0.0 47 | s: 64.0 48 | model_parallel: True 49 | weight: 1.0 50 | 51 | LRScheduler: 52 | name: Poly 53 | learning_rate: 0.1 54 | decay_unit: step 55 | warmup_steps: 0 56 | 57 | Optimizer: 58 | name: Momentum 59 | momentum: 0.9 60 | weight_decay: 5e-4 61 | grad_clip: 62 | name: ClipGradByGlobalNorm 63 | clip_norm: 5.0 64 | always_clip: True 65 | no_clip_list: ['dist'] 66 | 67 | # data loader for train and eval 68 | DataLoader: 69 | Train: 70 | dataset: 71 | name: FaceIdentificationDataset 72 | image_root: ./dataset/MS1M_v3/ 73 | cls_label_path: ./dataset/MS1M_v3/label.txt 74 | transform_ops: 75 | - DecodeImage: 76 | to_rgb: True 77 | channel_first: False 78 | - RandFlipImage: 79 | flip_code: 1 80 | - NormalizeImage: 81 | scale: 1.0/255.0 82 | mean: [0.5, 0.5, 0.5] 83 | std: [0.5, 0.5, 0.5] 84 | order: '' 85 | - ToCHWImage: 86 | sampler: 87 | name: DistributedBatchSampler 88 | batch_size: 128 89 | drop_last: False 90 | shuffle: True 91 | loader: 92 | num_workers: 8 93 | use_shared_memory: True 94 | 95 | Eval: 96 | dataset: 97 | name: FaceVerificationDataset 98 | image_root: ./dataset/MS1M_v3/agedb_30 99 | cls_label_path: ./dataset/MS1M_v3/agedb_30/label.txt 100 | transform_ops: 101 | - DecodeImage: 102 | to_rgb: True 103 | channel_first: False 104 | - NormalizeImage: 105 | scale: 1.0/255.0 106 | mean: [0.5, 0.5, 0.5] 107 | std: [0.5, 0.5, 0.5] 108 | order: '' 109 | - ToCHWImage: 110 | sampler: 111 | name: BatchSampler 112 | batch_size: 128 113 | drop_last: False 114 | shuffle: False 115 | loader: 116 | num_workers: 0 117 | use_shared_memory: True 118 | 119 | Metric: 120 | Eval: 121 | - LFWAcc: 122 | flip_test: True 123 | 124 | Export: 125 | export_type: onnx 126 | input_shape: [None, 3, 112, 112] 127 | -------------------------------------------------------------------------------- /task/recognition/face/configs/MobileFaceNet_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.yaml: -------------------------------------------------------------------------------- 1 | # global configs 2 | Global: 3 | task_type: recognition 4 | train_epoch_func: default_train_one_epoch 5 | eval_func: face_verification_eval 6 | checkpoint: null 7 | pretrained_model: null 8 | output_dir: ./output/ 9 | device: gpu 10 | save_interval: 1 11 | max_num_latest_checkpoint: 0 12 | eval_during_train: True 13 | eval_interval: 10000 14 | eval_unit: "step" 15 | accum_steps: 1 16 | epochs: 20 17 | print_batch_step: 100 18 | use_visualdl: True 19 | seed: 2022 20 | 21 | # FP16 setting 22 | FP16: 23 | level: O1 24 | GradScaler: 25 | init_loss_scaling: 27648.0 26 | 27 | DistributedStrategy: 28 | data_parallel: True 29 | 30 | # model architecture 31 | Model: 32 | name: MobileFaceNet_base 33 | num_features: 512 34 | data_format : "NHWC" 35 | class_num: 2059906 36 | pfc_config: 37 | sample_ratio: 0.2 38 | model_parallel: True 39 | 40 | # loss function config for traing/eval process 41 | Loss: 42 | Train: 43 | - MarginLoss: 44 | m1: 1.0 45 | m2: 0.0 46 | m3: 0.4 47 | s: 64.0 48 | model_parallel: True 49 | weight: 1.0 50 | 51 | LRScheduler: 52 | name: Poly 53 | learning_rate: 0.1 54 | decay_unit: step 55 | warmup_steps: 2 56 | warmup_end_lr: 0.1 57 | 58 | Optimizer: 59 | name: Momentum 60 | momentum: 0.9 61 | weight_decay: 1e-4 62 | grad_clip: 63 | name: ClipGradByGlobalNorm 64 | clip_norm: 5.0 65 | always_clip: True 66 | no_clip_list: ['dist'] 67 | 68 | # data loader for train and eval 69 | DataLoader: 70 | Train: 71 | dataset: 72 | name: FaceIdentificationDataset 73 | image_root: ./dataset/WebFace42M 74 | cls_label_path: ./dataset/WebFace42M/label.txt 75 | transform_ops: 76 | - DecodeImage: 77 | to_rgb: True 78 | channel_first: False 79 | - RandFlipImage: 80 | flip_code: 1 81 | - NormalizeImage: 82 | scale: 1.0/255.0 83 | mean: [0.5, 0.5, 0.5] 84 | std: [0.5, 0.5, 0.5] 85 | order: '' 86 | - ToCHWImage: 87 | sampler: 88 | name: DistributedBatchSampler 89 | batch_size: 256 90 | drop_last: False 91 | shuffle: True 92 | loader: 93 | num_workers: 8 94 | use_shared_memory: True 95 | 96 | Eval: 97 | dataset: 98 | name: FaceVerificationDataset 99 | image_root: ./dataset/WebFace42M/agedb_30 100 | cls_label_path: ./dataset/WebFace42M/agedb_30/label.txt 101 | transform_ops: 102 | - DecodeImage: 103 | to_rgb: True 104 | channel_first: False 105 | - NormalizeImage: 106 | scale: 1.0/255.0 107 | mean: [0.5, 0.5, 0.5] 108 | std: [0.5, 0.5, 0.5] 109 | order: '' 110 | - ToCHWImage: 111 | sampler: 112 | name: BatchSampler 113 | batch_size: 128 114 | drop_last: False 115 | shuffle: False 116 | loader: 117 | num_workers: 0 118 | use_shared_memory: True 119 | 120 | Metric: 121 | Eval: 122 | - LFWAcc: 123 | flip_test: True 124 | 125 | Export: 126 | export_type: onnx 127 | input_shape: [None, 3, 112, 112] 128 | -------------------------------------------------------------------------------- /task/recognition/face/eval_ijbc.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | python onnx_ijbc.py \ 16 | --model-root ./output/IResNet50.onnx \ 17 | --image-path ./ijb/IJBC/ \ 18 | --target IJBC 19 | -------------------------------------------------------------------------------- /task/recognition/face/export.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0 18 | python -m paddle.distributed.launch \ 19 | --nnodes=$PADDLE_NNODES \ 20 | --master=$PADDLE_MASTER \ 21 | --devices=$CUDA_VISIBLE_DEVICES \ 22 | plsc-export \ 23 | -c ./configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 24 | -o Global.pretrained_model=output/IResNet50/latest \ 25 | -o FP16.level=O0 \ 26 | -o Model.data_format=NCHW 27 | -------------------------------------------------------------------------------- /task/recognition/face/train.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # for single card training 16 | # CUDA_VISIBLE_DEVICES=0 17 | # plsc-train -c ./configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml 18 | 19 | # for multi-node and multi-cards training 20 | # export PADDLE_NNODES=2 21 | # export PADDLE_MASTER="192.168.210.1:12538" 22 | # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 23 | 24 | # for single-node and multi-cards training 25 | export PADDLE_NNODES=1 26 | export PADDLE_MASTER="127.0.0.1:12538" 27 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 28 | python -m paddle.distributed.launch \ 29 | --nnodes=$PADDLE_NNODES \ 30 | --master=$PADDLE_MASTER \ 31 | --devices=$CUDA_VISIBLE_DEVICES \ 32 | plsc-train \ 33 | -c ./configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml 34 | -------------------------------------------------------------------------------- /task/ssl/cae/dall_e/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import paddle 16 | import paddle.nn as nn 17 | 18 | from dall_e.encoder import Encoder 19 | from dall_e.decoder import Decoder 20 | from dall_e.utils import map_pixels, unmap_pixels 21 | from dall_e.dalle_vae import create_d_vae, load_model 22 | -------------------------------------------------------------------------------- /task/ssl/cae/dall_e/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Ref: https://github.com/openai/DALL-E/blob/master/dall_e/utils.py 16 | 17 | import attr 18 | import math 19 | 20 | import paddle 21 | import paddle.nn as nn 22 | import paddle.nn.functional as F 23 | 24 | logit_laplace_eps: float = 0.1 25 | 26 | 27 | @attr.s(eq=False) 28 | class Conv2D(nn.Layer): 29 | n_in: int = attr.ib(validator=lambda i, a, x: x >= 1) 30 | n_out: int = attr.ib(validator=lambda i, a, x: x >= 1) 31 | kw: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 2 == 1) 32 | 33 | use_float16: bool = attr.ib(default=True) 34 | requires_grad: bool = attr.ib(default=False) 35 | 36 | def __attrs_post_init__(self) -> None: 37 | super().__init__() 38 | 39 | self.w = self.create_parameter( 40 | [self.n_out, self.n_in, self.kw, self.kw], 41 | dtype=paddle.float32, 42 | default_initializer=nn.initializer.Normal(std=1 / math.sqrt( 43 | self.n_in * self.kw**2))) 44 | self.w.stop_gradient = not self.requires_grad 45 | 46 | self.b = self.create_parameter( 47 | [self.n_out], 48 | dtype=paddle.float32, 49 | default_initializer=nn.initializer.Constant(value=0)) 50 | self.b.stop_gradient = not self.requires_grad 51 | 52 | def forward(self, x: paddle.Tensor) -> paddle.Tensor: 53 | if self.use_float16: 54 | if x.dtype != paddle.float16: 55 | x = x.astype(paddle.float16) 56 | 57 | w, b = self.w.astype(paddle.float16), self.b.astype(paddle.float16) 58 | else: 59 | if x.dtype != paddle.float32: 60 | x = x.astype(paddle.float32) 61 | 62 | w, b = self.w, self.b 63 | 64 | return F.conv2d(x, w, bias=b, padding=(self.kw - 1) // 2) 65 | 66 | 67 | def map_pixels(x: paddle.Tensor) -> paddle.Tensor: 68 | if x.dtype != paddle.float32: 69 | raise ValueError('expected input to have type float') 70 | 71 | return (1 - 2 * logit_laplace_eps) * x + logit_laplace_eps 72 | 73 | 74 | def unmap_pixels(x: paddle.Tensor) -> paddle.Tensor: 75 | if len(x.shape) != 4: 76 | raise ValueError('expected input to be 4d') 77 | if x.dtype != paddle.float32: 78 | raise ValueError('expected input to have type float') 79 | 80 | return paddle.clip((x - logit_laplace_eps) / (1 - 2 * logit_laplace_eps), 81 | 0, 1) 82 | -------------------------------------------------------------------------------- /task/ssl/cae/extract_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import argparse 16 | import paddle 17 | 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument( 20 | '--input', 21 | type=str, 22 | default='output/ep800_fp16o1/ep800_fp16o1_checkpoint-799.pd') 23 | parser.add_argument( 24 | '--output', 25 | type=str, 26 | default='output/ep800_fp16o1/cae_base_patch16_224_8k_vocab_pretrained_800ep.pd' 27 | ) 28 | 29 | args = parser.parse_args() 30 | 31 | checkpoint = paddle.load(args.input) 32 | checkpoint_model = checkpoint['model'] 33 | paddle.save({'model': checkpoint_model}, args.output) 34 | -------------------------------------------------------------------------------- /task/ssl/cae/finetune.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # unset PADDLE_TRAINER_ENDPOINTS 16 | # export PADDLE_NNODES=4 17 | # export PADDLE_MASTER="xxx.xxx.xxx.xxx:12538" 18 | # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | # export PADDLE_JOB_ID=CAE 20 | 21 | tmp_my_name=finetune_ep100_fp16o1 22 | my_name=${tmp_my_name%.*} 23 | OUTPUT_DIR='./output/'$my_name 24 | echo $OUTPUT_DIR 25 | DATA_PATH='./dataset/ILSVRC2012/' 26 | MODEL_PATH='output/ep800_fp16o1/ep800_fp16o1_checkpoint-799.pd' 27 | FLAGS_cudnn_exhaustive_search=True 28 | export FLAGS_gemm_use_half_precision_compute_type=False 29 | 30 | python -m paddle.distributed.launch \ 31 | --nnodes=$PADDLE_NNODES \ 32 | --master=$PADDLE_MASTER \ 33 | --devices=$CUDA_VISIBLE_DEVICES \ 34 | main_finetune.py \ 35 | --data_path ${DATA_PATH} \ 36 | --output_dir ${OUTPUT_DIR} \ 37 | --model cae_base_patch16_224 \ 38 | --finetune $MODEL_PATH \ 39 | --nb_classes 1000 \ 40 | --batch_size 128 \ 41 | --lr 8e-3 \ 42 | --accum_iter 1 \ 43 | --warmup_epochs 5 \ 44 | --epochs 100 \ 45 | --layer_decay 0.65 \ 46 | --drop_path 0.1 \ 47 | --weight_decay 0.05 \ 48 | --mixup 0.8 \ 49 | --cutmix 1.0 \ 50 | --sin_pos_emb \ 51 | --dist_eval \ 52 | --no_auto_resume \ 53 | --exp_name $my_name 54 | -------------------------------------------------------------------------------- /task/ssl/cae/linprobe.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # unset PADDLE_TRAINER_ENDPOINTS 16 | # export PADDLE_NNODES=4 17 | # export PADDLE_MASTER="xxx.xxx.xxx.xxx:12538" 18 | # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | # export PADDLE_JOB_ID=CAE 20 | 21 | tmp_my_name=linprobe_ep90_fp16o1 22 | my_name=${tmp_my_name%.*} 23 | OUTPUT_DIR='./output/'$my_name 24 | echo $OUTPUT_DIR 25 | DATA_PATH='./dataset/ILSVRC2012/' 26 | MODEL_PATH='output/ep800_fp16o1/ep800_fp16o1_checkpoint-799.pd' 27 | FLAGS_cudnn_exhaustive_search=True 28 | export FLAGS_gemm_use_half_precision_compute_type=False 29 | 30 | python -m paddle.distributed.launch \ 31 | --nnodes=$PADDLE_NNODES \ 32 | --master=$PADDLE_MASTER \ 33 | --devices=$CUDA_VISIBLE_DEVICES \ 34 | main_linprobe.py \ 35 | --data_path ${DATA_PATH} \ 36 | --output_dir ${OUTPUT_DIR} \ 37 | --model cae_base_patch16_224 \ 38 | --finetune $MODEL_PATH \ 39 | --nb_classes 1000 \ 40 | --batch_size 512 \ 41 | --epochs 90 \ 42 | --blr 0.1 \ 43 | --weight_decay 0.0 \ 44 | --dist_eval \ 45 | --log_dir $OUTPUT_DIR \ 46 | --enable_linear_eval \ 47 | --use_cls \ 48 | --save_freq 50 \ 49 | --disable_rel_pos_bias \ 50 | --linear_type standard \ 51 | --exp_name $my_name 52 | -------------------------------------------------------------------------------- /task/ssl/cae/pretrain.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # unset PADDLE_TRAINER_ENDPOINTS 16 | # export PADDLE_NNODES=4 17 | # export PADDLE_MASTER="xxx.xxx.xxx.xxx:12538" 18 | # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | # export PADDLE_JOB_ID=CAE 20 | 21 | tmp_my_name=ep800_fp16o1 22 | my_name=${tmp_my_name%.*} 23 | OUTPUT_DIR='./output/'$my_name 24 | echo $OUTPUT_DIR 25 | DATA_PATH='./dataset/ILSVRC2012/' 26 | TOKENIZER_PATH=dalle-weights 27 | FLAGS_cudnn_exhaustive_search=True 28 | export FLAGS_gemm_use_half_precision_compute_type=False 29 | 30 | python -m paddle.distributed.launch \ 31 | --nnodes=$PADDLE_NNODES \ 32 | --master=$PADDLE_MASTER \ 33 | --devices=$CUDA_VISIBLE_DEVICES \ 34 | main_pretrain.py \ 35 | --data_path ${DATA_PATH} \ 36 | --output_dir ${OUTPUT_DIR} \ 37 | --model cae_base_patch16_224_8k_vocab --discrete_vae_weight_path ${TOKENIZER_PATH} \ 38 | --batch_size 64 --lr 1.5e-3 --warmup_epochs 10 --epochs 800 \ 39 | --clip_grad 3.0 --layer_scale_init_value 0.1 \ 40 | --imagenet_default_mean_and_std \ 41 | --color_jitter 0 \ 42 | --drop_path 0 \ 43 | --sincos_pos_emb \ 44 | --mask_generator block \ 45 | --num_mask_patches 75 \ 46 | --decoder_layer_scale_init_value 0.1 \ 47 | --no_auto_resume \ 48 | --save_ckpt_freq 50 \ 49 | --exp_name $my_name \ 50 | --regressor_depth 4 \ 51 | --seed 0 \ 52 | --log_dir vdl \ 53 | --num_decoder_self_attention 4 \ 54 | --dual_loss_weight 2 \ 55 | --dual_path_ema 0 \ 56 | --amp 57 | -------------------------------------------------------------------------------- /task/ssl/cae/util/loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import paddle 16 | import paddle.nn as nn 17 | import paddle.nn.functional as F 18 | 19 | 20 | class LabelSmoothingCrossEntropy(nn.Layer): 21 | """ NLL loss with label smoothing. 22 | """ 23 | 24 | def __init__(self, smoothing=0.1): 25 | super(LabelSmoothingCrossEntropy, self).__init__() 26 | assert smoothing < 1.0 27 | self.smoothing = smoothing 28 | self.confidence = 1. - smoothing 29 | 30 | def forward(self, x: paddle.Tensor, 31 | target: paddle.Tensor) -> paddle.Tensor: 32 | logprobs = F.log_softmax(x, axis=-1) 33 | nll_loss = -logprobs.gather(axis=-1, index=target.unsqueeze(1)) 34 | nll_loss = nll_loss.squeeze(1) 35 | smooth_loss = -logprobs.mean(axis=-1) 36 | loss = self.confidence * nll_loss + self.smoothing * smooth_loss 37 | return loss.mean() 38 | 39 | 40 | class SoftTargetCrossEntropy(nn.Layer): 41 | def __init__(self): 42 | super(SoftTargetCrossEntropy, self).__init__() 43 | 44 | def forward(self, x: paddle.Tensor, 45 | target: paddle.Tensor) -> paddle.Tensor: 46 | loss = paddle.sum(-target * F.log_softmax(x, axis=-1), axis=-1) 47 | return loss.mean() 48 | -------------------------------------------------------------------------------- /task/ssl/cae/util/lr_decay.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # -------------------------------------------------------- 16 | # References: 17 | # MAE: https://github.com/facebookresearch/mae 18 | # -------------------------------------------------------- 19 | 20 | import json 21 | 22 | 23 | def param_groups_lrd(model, 24 | weight_decay=0.05, 25 | no_weight_decay_list=[], 26 | layer_decay=.75, 27 | num_layers=None): 28 | """ 29 | Parameter groups for layer-wise lr decay 30 | Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 31 | """ 32 | param_group_names = {} 33 | param_groups = {} 34 | 35 | if num_layers == None: 36 | num_layers = len(model.blocks) + 1 37 | 38 | layer_scales = list(layer_decay**(num_layers - i) 39 | for i in range(num_layers + 1)) 40 | 41 | for n, p in model.named_parameters(): 42 | if p.stop_gradient: 43 | continue 44 | 45 | if 'teacher' in n: 46 | continue 47 | 48 | # no decay: all 1D parameters and model specific ones 49 | if p.ndim == 1 or n in no_weight_decay_list: 50 | g_decay = "no_decay" 51 | this_decay = 0. 52 | else: 53 | g_decay = "decay" 54 | this_decay = weight_decay 55 | 56 | layer_id = get_layer_id_for_vit(n, num_layers) 57 | group_name = "layer_%d_%s" % (layer_id, g_decay) 58 | 59 | if group_name not in param_group_names: 60 | this_scale = layer_scales[layer_id] 61 | 62 | param_group_names[group_name] = { 63 | "lr_scale": this_scale, 64 | "weight_decay": this_decay, 65 | "params": [], 66 | } 67 | param_groups[group_name] = { 68 | "lr_scale": this_scale, 69 | "weight_decay": this_decay, 70 | "params": [], 71 | } 72 | 73 | param_group_names[group_name]["params"].append(n) 74 | param_groups[group_name]["params"].append(p) 75 | 76 | # print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) 77 | 78 | return list(param_groups.values()) 79 | 80 | 81 | def get_layer_id_for_vit(name, num_layers): 82 | """ 83 | Assign a parameter with its layer id 84 | Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 85 | """ 86 | if name in ("cls_token", "mask_token", "pos_embed"): 87 | return 0 88 | elif name.startswith("patch_embed"): 89 | return 0 90 | elif name.startswith("blocks"): 91 | return int(name.split('.')[1]) + 1 92 | else: 93 | return num_layers - 1 94 | -------------------------------------------------------------------------------- /task/ssl/cae/util/lr_sched.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # -------------------------------------------------------- 16 | # References: 17 | # MAE: https://github.com/facebookresearch/mae 18 | # -------------------------------------------------------- 19 | 20 | import math 21 | 22 | 23 | def adjust_learning_rate(optimizer, epoch, args): 24 | """Decay the learning rate with half-cycle cosine after warmup""" 25 | if epoch < args.warmup_epochs: 26 | lr = args.lr * epoch / args.warmup_epochs 27 | else: 28 | lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ 29 | (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) 30 | for param_group in optimizer.param_groups: 31 | if "lr_scale" in param_group: 32 | param_group["lr"] = lr * param_group["lr_scale"] 33 | else: 34 | param_group["lr"] = lr 35 | return lr 36 | -------------------------------------------------------------------------------- /task/ssl/mae/finetune.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=MAE 20 | 21 | # for single node finetune 22 | # batch_size 32, ACCUM_ITER=4, effective batch size: 1024 23 | # batch_size 128, ACCUM_ITER=1, effective batch size: 1024 24 | 25 | # 4 nodes finetune setting 26 | ACCUM_ITER=1 27 | PRETRAIN_CHKPT='output_dir/checkpoint-1599.pd' 28 | IMAGENET_DIR=./dataset/ILSVRC2012/ 29 | python -m paddle.distributed.launch \ 30 | --nnodes=$PADDLE_NNODES \ 31 | --master=$PADDLE_MASTER \ 32 | --devices=$CUDA_VISIBLE_DEVICES \ 33 | main_finetune.py \ 34 | --accum_iter $ACCUM_ITER \ 35 | --batch_size 32 \ 36 | --model maevit_base_patch16 \ 37 | --finetune ${PRETRAIN_CHKPT} \ 38 | --epochs 100 \ 39 | --blr 5e-4 --layer_decay 0.65 \ 40 | --weight_decay 0.05 --drop_path 0.1 --reprob 0.25 --mixup 0.8 --cutmix 1.0 \ 41 | --dist_eval --data_path ${IMAGENET_DIR} 42 | 43 | 44 | # export CUDA_VISIBLE_DEVICES=0 45 | # python -m paddle.distributed.launch \ 46 | # --nnodes=$PADDLE_NNODES \ 47 | # --master=$PADDLE_MASTER \ 48 | # --devices=$CUDA_VISIBLE_DEVICES \ 49 | # main_finetune.py --eval \ 50 | # --resume output_dir/checkpoint-99.pd \ 51 | # --model maevit_base_patch16 \ 52 | # --batch_size 32 \ 53 | # --weight_decay 0.05 --drop_path 0.1 --reprob 0.25 --mixup 0.8 --cutmix 1.0 \ 54 | # --data_path ${IMAGENET_DIR} 55 | -------------------------------------------------------------------------------- /task/ssl/mae/finetune_convmae.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=ConvMAE 20 | 21 | # 4 nodes finetune setting 22 | ACCUM_ITER=1 23 | PRETRAIN_CHKPT='output_dir/checkpoint-1599.pd' 24 | IMAGENET_DIR=./dataset/ILSVRC2012/ 25 | python -m paddle.distributed.launch \ 26 | --nnodes=$PADDLE_NNODES \ 27 | --master=$PADDLE_MASTER \ 28 | --devices=$CUDA_VISIBLE_DEVICES \ 29 | main_finetune.py \ 30 | --accum_iter $ACCUM_ITER \ 31 | --batch_size 32 \ 32 | --model convvit_base_patch16 \ 33 | --finetune ${PRETRAIN_CHKPT} \ 34 | --epochs 100 \ 35 | --blr 5e-4 --layer_decay 0.65 \ 36 | --weight_decay 0.05 --drop_path 0.1 --reprob 0.25 --mixup 0.8 --cutmix 1.0 \ 37 | --dist_eval --data_path ${IMAGENET_DIR} 38 | 39 | 40 | # export CUDA_VISIBLE_DEVICES=0 41 | # python -m paddle.distributed.launch \ 42 | # --nnodes=$PADDLE_NNODES \ 43 | # --master=$PADDLE_MASTER \ 44 | # --devices=$CUDA_VISIBLE_DEVICES \ 45 | # main_finetune.py --eval \ 46 | # --resume output_dir/checkpoint-99.pd \ 47 | # --model convvit_base_patch16 \ 48 | # --batch_size 32 \ 49 | # --weight_decay 0.05 --drop_path 0.1 --reprob 0.25 --mixup 0.8 --cutmix 1.0 \ 50 | # --data_path ${IMAGENET_DIR} 51 | -------------------------------------------------------------------------------- /task/ssl/mae/linprobe.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=1 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=MAE 20 | 21 | IMAGENET_DIR=./dataset/ILSVRC2012/ 22 | 23 | # 1 for four node, 4 for single node 24 | ACCUM_ITER=1 25 | PRETRAIN_CHKPT='./output_dir/checkpoint-1599.pd' 26 | python -m paddle.distributed.launch \ 27 | --nnodes=$PADDLE_NNODES \ 28 | --master=$PADDLE_MASTER \ 29 | --devices=$CUDA_VISIBLE_DEVICES \ 30 | main_linprobe.py \ 31 | --accum_iter $ACCUM_ITER \ 32 | --batch_size 512 \ 33 | --model maevit_base_patch16 \ 34 | --cls_token \ 35 | --finetune ${PRETRAIN_CHKPT} \ 36 | --epochs 90 \ 37 | --blr 0.1 \ 38 | --weight_decay 0.0 \ 39 | --dist_eval --data_path ${IMAGENET_DIR} 40 | 41 | #export CUDA_VISIBLE_DEVICES=0 42 | #python -m paddle.distributed.launch \ 43 | # --nnodes=$PADDLE_NNODES \ 44 | # --master=$PADDLE_MASTER \ 45 | # --devices=$CUDA_VISIBLE_DEVICES \ 46 | # main_linprobe.py --eval \ 47 | # --resume output_dir/checkpoint-88.pd \ 48 | # --model maevit_base_patch16 \ 49 | # --batch_size 512 \ 50 | # --data_path ${IMAGENET_DIR} 51 | -------------------------------------------------------------------------------- /task/ssl/mae/linprobe_convmae.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=ConvMAE 20 | 21 | IMAGENET_DIR=./dataset/ILSVRC2012/ 22 | 23 | # 1 for four node, 4 for single node 24 | ACCUM_ITER=1 25 | PRETRAIN_CHKPT='./output_dir/checkpoint-1599.pd' 26 | python -m paddle.distributed.launch \ 27 | --nnodes=$PADDLE_NNODES \ 28 | --master=$PADDLE_MASTER \ 29 | --devices=$CUDA_VISIBLE_DEVICES \ 30 | main_linprobe.py \ 31 | --accum_iter $ACCUM_ITER \ 32 | --batch_size 128 \ 33 | --model convvit_base_patch16 \ 34 | --global_pool \ 35 | --finetune ${PRETRAIN_CHKPT} \ 36 | --epochs 90 \ 37 | --blr 0.1 \ 38 | --weight_decay 0.0 \ 39 | --dist_eval --data_path ${IMAGENET_DIR} 40 | 41 | #export CUDA_VISIBLE_DEVICES=0 42 | #python -m paddle.distributed.launch \ 43 | # --nnodes=$PADDLE_NNODES \ 44 | # --master=$PADDLE_MASTER \ 45 | # --devices=$CUDA_VISIBLE_DEVICES \ 46 | # main_linprobe.py --eval \ 47 | # --resume output_dir/checkpoint-88.pd \ 48 | # --model vit_base_patch16 \ 49 | # --batch_size 512 \ 50 | # --data_path ${IMAGENET_DIR} 51 | -------------------------------------------------------------------------------- /task/ssl/mae/pretrain.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=MAE 20 | 21 | # If you use single node 22 | # batch_size 64, ACCUM_ITER=8, effective batch size: 4096 23 | # batch_size 256, ACCUM_ITER=2, effective batch size: 4096 24 | 25 | # 4 nodes for pretrain 26 | ACCUM_ITER=1 27 | IMAGENET_DIR=./dataset/ILSVRC2012/ 28 | python -m paddle.distributed.launch \ 29 | --nnodes=$PADDLE_NNODES \ 30 | --master=$PADDLE_MASTER \ 31 | --devices=$CUDA_VISIBLE_DEVICES \ 32 | main_pretrain.py \ 33 | --accum_iter $ACCUM_ITER \ 34 | --batch_size 128 \ 35 | --model mae_vit_base_patch16 \ 36 | --norm_pix_loss \ 37 | --mask_ratio 0.75 \ 38 | --epochs 1600 \ 39 | --warmup_epochs 40 \ 40 | --blr 1.5e-4 --weight_decay 0.05 \ 41 | --data_path ${IMAGENET_DIR} 42 | -------------------------------------------------------------------------------- /task/ssl/mae/pretrain_convmae.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | unset PADDLE_TRAINER_ENDPOINTS 16 | export PADDLE_NNODES=3 17 | export PADDLE_MASTER="10.67.228.16:12538" 18 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | export PADDLE_JOB_ID=ConvMAE 20 | 21 | # 3 nodes for pretrain 22 | ACCUM_ITER=1 23 | IMAGENET_DIR=./dataset/ILSVRC2012/ 24 | python -m paddle.distributed.launch \ 25 | --nnodes=$PADDLE_NNODES \ 26 | --master=$PADDLE_MASTER \ 27 | --devices=$CUDA_VISIBLE_DEVICES \ 28 | main_pretrain.py \ 29 | --accum_iter $ACCUM_ITER \ 30 | --batch_size 128 \ 31 | --model convmae_convvit_base_patch16 \ 32 | --norm_pix_loss \ 33 | --mask_ratio 0.75 \ 34 | --epochs 1600 \ 35 | --warmup_epochs 40 \ 36 | --blr 1.5e-4 --weight_decay 0.05 \ 37 | --data_path ${IMAGENET_DIR} 38 | -------------------------------------------------------------------------------- /task/ssl/mae/util/loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import paddle 16 | import paddle.nn as nn 17 | import paddle.nn.functional as F 18 | 19 | 20 | class LabelSmoothingCrossEntropy(nn.Layer): 21 | """ NLL loss with label smoothing. 22 | """ 23 | 24 | def __init__(self, smoothing=0.1): 25 | super(LabelSmoothingCrossEntropy, self).__init__() 26 | assert smoothing < 1.0 27 | self.smoothing = smoothing 28 | self.confidence = 1. - smoothing 29 | 30 | def forward(self, x: paddle.Tensor, 31 | target: paddle.Tensor) -> paddle.Tensor: 32 | logprobs = F.log_softmax(x, axis=-1) 33 | nll_loss = -logprobs.gather(axis=-1, index=target.unsqueeze(1)) 34 | nll_loss = nll_loss.squeeze(1) 35 | smooth_loss = -logprobs.mean(axis=-1) 36 | loss = self.confidence * nll_loss + self.smoothing * smooth_loss 37 | return loss.mean() 38 | 39 | 40 | class SoftTargetCrossEntropy(nn.Layer): 41 | def __init__(self): 42 | super(SoftTargetCrossEntropy, self).__init__() 43 | 44 | def forward(self, x: paddle.Tensor, 45 | target: paddle.Tensor) -> paddle.Tensor: 46 | loss = paddle.sum(-target * F.log_softmax(x, axis=-1), axis=-1) 47 | return loss.mean() 48 | -------------------------------------------------------------------------------- /task/ssl/mae/util/lr_decay.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # -------------------------------------------------------- 16 | # References: 17 | # MAE: https://github.com/facebookresearch/mae 18 | # -------------------------------------------------------- 19 | 20 | import json 21 | 22 | 23 | def param_groups_lrd(model, 24 | weight_decay=0.05, 25 | no_weight_decay_list=[], 26 | layer_decay=.75, 27 | num_layers=None): 28 | """ 29 | Parameter groups for layer-wise lr decay 30 | Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 31 | """ 32 | param_group_names = {} 33 | param_groups = {} 34 | 35 | if num_layers == None: 36 | num_layers = len(model.blocks) + 1 37 | 38 | layer_scales = list(layer_decay**(num_layers - i) 39 | for i in range(num_layers + 1)) 40 | 41 | for n, p in model.named_parameters(): 42 | if p.stop_gradient: 43 | continue 44 | 45 | # no decay: all 1D parameters and model specific ones 46 | if p.ndim == 1 or n in no_weight_decay_list: 47 | g_decay = "no_decay" 48 | this_decay = 0. 49 | else: 50 | g_decay = "decay" 51 | this_decay = weight_decay 52 | 53 | layer_id = get_layer_id_for_vit(n, num_layers) 54 | group_name = "layer_%d_%s" % (layer_id, g_decay) 55 | 56 | if group_name not in param_group_names: 57 | this_scale = layer_scales[layer_id] 58 | 59 | param_group_names[group_name] = { 60 | "lr_scale": this_scale, 61 | "weight_decay": this_decay, 62 | "params": [], 63 | } 64 | param_groups[group_name] = { 65 | "lr_scale": this_scale, 66 | "weight_decay": this_decay, 67 | "params": [], 68 | } 69 | 70 | param_group_names[group_name]["params"].append(n) 71 | param_groups[group_name]["params"].append(p) 72 | 73 | # print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) 74 | 75 | return list(param_groups.values()) 76 | 77 | 78 | def get_layer_id_for_vit(name, num_layers): 79 | """ 80 | Assign a parameter with its layer id 81 | Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 82 | """ 83 | if name in ['cls_token', 'pos_embed']: 84 | return 0 85 | elif name.startswith('patch_embed'): 86 | return 0 87 | elif name.startswith('blocks'): 88 | return int(name.split('.')[1]) + 1 89 | else: 90 | return num_layers 91 | -------------------------------------------------------------------------------- /task/ssl/mae/util/lr_sched.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # -------------------------------------------------------- 16 | # References: 17 | # MAE: https://github.com/facebookresearch/mae 18 | # -------------------------------------------------------- 19 | 20 | import math 21 | 22 | 23 | def adjust_learning_rate(optimizer, epoch, args): 24 | """Decay the learning rate with half-cycle cosine after warmup""" 25 | if epoch < args.warmup_epochs: 26 | lr = args.lr * epoch / args.warmup_epochs 27 | else: 28 | lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ 29 | (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) 30 | for param_group in optimizer.param_groups: 31 | if "lr_scale" in param_group: 32 | param_group["lr"] = lr * param_group["lr_scale"] 33 | else: 34 | param_group["lr"] = lr 35 | return lr 36 | -------------------------------------------------------------------------------- /task/ssl/mae/util/optim_factory.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # -------------------------------------------------------- 16 | # References: 17 | # MAE: https://github.com/facebookresearch/mae 18 | # -------------------------------------------------------- 19 | 20 | 21 | def add_weight_decay(model, weight_decay=1e-5, skip_list=()): 22 | decay = [] 23 | no_decay = [] 24 | for name, param in model.named_parameters(): 25 | if param.stop_gradient: 26 | continue # frozen weights 27 | if len(param.shape) == 1 or name.endswith( 28 | ".bias") or name in skip_list: 29 | no_decay.append(param) 30 | else: 31 | decay.append(param) 32 | return [{ 33 | 'params': no_decay, 34 | 'weight_decay': 0. 35 | }, { 36 | 'params': decay, 37 | 'weight_decay': weight_decay 38 | }] 39 | -------------------------------------------------------------------------------- /task/ssl/mocov3/extract_weight.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import argparse 16 | import os 17 | import paddle 18 | 19 | if __name__ == '__main__': 20 | parser = argparse.ArgumentParser( 21 | description='Convert MoCo Pre-Traind Model to DEiT') 22 | parser.add_argument( 23 | '--input', 24 | default='', 25 | type=str, 26 | metavar='PATH', 27 | required=True, 28 | help='path to moco pre-trained checkpoint') 29 | parser.add_argument( 30 | '--output', 31 | default='', 32 | type=str, 33 | metavar='PATH', 34 | required=True, 35 | help='path to output checkpoint in DEiT format') 36 | args = parser.parse_args() 37 | print(args) 38 | 39 | # load input 40 | checkpoint = paddle.load(args.input) 41 | state_dict = checkpoint['state_dict'] 42 | for k in list(state_dict.keys()): 43 | # retain only base_encoder up to before the embedding layer 44 | if k.startswith('base_encoder') and not k.startswith( 45 | 'base_encoder.head'): 46 | # remove prefix 47 | state_dict[k[len("base_encoder."):]] = state_dict[k] 48 | # delete renamed or unused k 49 | del state_dict[k] 50 | 51 | # make output directory if necessary 52 | output_dir = os.path.dirname(args.output) 53 | if not os.path.isdir(output_dir): 54 | os.makedirs(output_dir) 55 | # save to output 56 | paddle.save(state_dict, args.output) 57 | -------------------------------------------------------------------------------- /task/ssl/mocov3/finetune.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Note: Set the following environment variables 16 | # and then need to run the script on each node. 17 | unset PADDLE_TRAINER_ENDPOINTS 18 | export PADDLE_NNODES=1 19 | export PADDLE_MASTER="127.0.0.1:12538" 20 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 21 | export FLAGS_stop_check_timeout=3600 22 | 23 | python -m paddle.distributed.launch \ 24 | --nnodes=$PADDLE_NNODES \ 25 | --master=$PADDLE_MASTER \ 26 | --devices=$CUDA_VISIBLE_DEVICES \ 27 | plsc-train \ 28 | -c ./configs/DeiT_base_patch16_224_in1k_1n8c_dp_fp16o1.yaml \ 29 | -o Global.epochs=150 \ 30 | -o Global.pretrained_model=pretrained/moco_vit_base \ 31 | -o Global.finetune=True 32 | -------------------------------------------------------------------------------- /task/ssl/mocov3/linprob.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | unset PADDLE_TRAINER_ENDPOINTS 16 | export PADDLE_NNODES=1 17 | export PADDLE_MASTER="127.0.0.1:12538" 18 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | export FLAGS_stop_check_timeout=3600 20 | 21 | IMAGENET_DIR=./dataset/ILSVRC2012/ 22 | python -m paddle.distributed.launch \ 23 | --nnodes=$PADDLE_NNODES \ 24 | --master=$PADDLE_MASTER \ 25 | --devices=$CUDA_VISIBLE_DEVICES \ 26 | main_lincls.py \ 27 | -a moco_vit_base \ 28 | --lr=3 \ 29 | --pretrained pretrained/checkpoint_0299.pd \ 30 | ${IMAGENET_DIR} 31 | -------------------------------------------------------------------------------- /task/ssl/mocov3/pretrain.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | export FLAGS_stop_check_timeout=3600 20 | 21 | IMAGENET_DIR=./dataset/ILSVRC2012/ 22 | python -m paddle.distributed.launch \ 23 | --nnodes=$PADDLE_NNODES \ 24 | --master=$PADDLE_MASTER \ 25 | --devices=$CUDA_VISIBLE_DEVICES \ 26 | main_moco.py \ 27 | -a moco_vit_base \ 28 | --optimizer=adamw --lr=1.5e-4 --weight-decay=.1 \ 29 | --epochs=300 --warmup-epochs=40 \ 30 | --stop-grad-conv1 --moco-m-cos --moco-t=.2 \ 31 | ${IMAGENET_DIR} 32 | -------------------------------------------------------------------------------- /tests/CI/before_hook.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #!/usr/bin/env bash 16 | set -e 17 | 18 | export plsc_path=/paddle/PLSC/tests/CI 19 | export data_path=/plsc_data 20 | export pretrained_path=/plsc_pretrained 21 | export log_path=/paddle/log_plsc 22 | mkdir -p ${log_path} 23 | 24 | function before_hook() { 25 | echo "=============================paddle commit=============================" 26 | python -c "import paddle;print(paddle.__git_commit__)" 27 | 28 | # install requirements 29 | cd /paddle/PLSC/ 30 | echo ---------- install plsc ---------- 31 | export http_proxy=${proxy}; 32 | export https_proxy=${proxy}; 33 | pip install -r requirements.txt 34 | python setup.py develop 35 | 36 | 37 | echo ---------- ln plsc_data start ---------- 38 | cd ${plsc_path} 39 | rm -rf dataset 40 | ln -s ${data_path} ./dataset 41 | echo ---------- ln plsc_data done ---------- 42 | 43 | echo ---------- ln plsc_pretrained start ---------- 44 | cd ${plsc_path} 45 | rm -rf pretrained 46 | ln -s ${pretrained_path} ./pretrained 47 | echo ---------- ln plsc_pretrained done ---------- 48 | } 49 | 50 | main() { 51 | before_hook 52 | } 53 | 54 | main$@ 55 | -------------------------------------------------------------------------------- /tests/CI/classification/cait/cait_s24_224_in1k_1n8c_dp_fp16o2.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/classification/cait/configs/cait_s24_224_in1k_1n8c_dp_fp16o2.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=50 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 29 | -------------------------------------------------------------------------------- /tests/CI/classification/convnext/ConvNeXt_base_224_in1k_1n8c_dp_fp32.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/classification/convnext/configs/ConvNeXt_base_224_in1k_1n8c_dp_fp32.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=51 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Global.accum_steps=8 30 | -------------------------------------------------------------------------------- /tests/CI/classification/deit/DeiT_base_patch16_224_in1k_1n8c_dp_fp16o2.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/classification/deit/configs/DeiT_base_patch16_224_in1k_1n8c_dp_fp16o2.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=50 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 29 | -------------------------------------------------------------------------------- /tests/CI/classification/deit/DeiT_base_patch16_224_in1k_1n8c_dp_fp32.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/classification/deit/configs/DeiT_base_patch16_224_in1k_1n8c_dp_fp32.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=50 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 29 | -------------------------------------------------------------------------------- /tests/CI/classification/swin/swin_base_patch4_window7_224_fp16o2.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/classification/swin/configs/swin_base_patch4_window7_224_in1k_1n8c_dp_fp16o2.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=50 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 29 | -------------------------------------------------------------------------------- /tests/CI/classification/vit/ViT_base_patch16_224_in1k_1n8c_dp_fp16o2.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/classification/vit/configs/ViT_base_patch16_224_in1k_1n8c_dp_fp16o2.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=50 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 29 | 30 | -------------------------------------------------------------------------------- /tests/CI/classification/vit/ViT_base_patch16_384_ft_in1k_1n8c_dp_fp16o2.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/classification/vit/configs/ViT_base_patch16_384_ft_in1k_1n8c_dp_fp16o2.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=50 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Global.pretrained_model=./pretrained/ViT_base_patch16_224/imagenet2012-ViT-B_16-224 30 | -------------------------------------------------------------------------------- /tests/CI/end_hook.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | export log_path=/paddle/log_plsc 18 | 19 | function end_hook() { 20 | num=`cat $log_path/result.log | grep "failed" | wc -l` 21 | if [ "${num}" -gt "0" ];then 22 | echo -e "=============================base cases=============================" 23 | cat $log_path/result.log | grep "failed" 24 | echo -e "====================================================================" 25 | exit 1 26 | else 27 | exit 0 28 | fi 29 | } 30 | 31 | main() { 32 | end_hook 33 | } 34 | 35 | main$@ 36 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/FaceViT_base_patch9_112_WebFace42M_CosFace_pfc03_droppath005_mask005_1n8c_dp_mp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/FaceViT_base_patch9_112_WebFace42M_CosFace_pfc03_droppath005_mask005_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Model.class_num=93431 \ 30 | -o DataLoader.Train.sampler.batch_size=128 \ 31 | -o DataLoader.Train.dataset.image_root=./dataset/MS1M_v3/ \ 32 | -o DataLoader.Train.dataset.cls_label_path=./dataset/MS1M_v3/label.txt \ 33 | -o DataLoader.Eval.dataset.image_root=./dataset/MS1M_v3/agedb_30/ \ 34 | -o DataLoader.Eval.dataset.cls_label_path=./dataset/MS1M_v3/agedb_30/label.txt \ 35 | 36 | 37 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc02_droppath005_mask0_1n8c_dp_mp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc02_droppath005_mask0_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Model.class_num=93431 \ 30 | -o DataLoader.Train.dataset.image_root=./dataset/MS1M_v3/ \ 31 | -o DataLoader.Train.dataset.cls_label_path=./dataset/MS1M_v3/label.txt \ 32 | -o DataLoader.Eval.dataset.image_root=./dataset/MS1M_v3/agedb_30/ \ 33 | -o DataLoader.Eval.dataset.cls_label_path=./dataset/MS1M_v3/agedb_30/label.txt \ 34 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc10_droppath005_mask0_1n8c_dp_mp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc10_droppath005_mask0_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Model.class_num=93431 \ 30 | -o DataLoader.Train.dataset.image_root=./dataset/MS1M_v3/ \ 31 | -o DataLoader.Train.dataset.cls_label_path=./dataset/MS1M_v3/label.txt \ 32 | -o DataLoader.Eval.dataset.image_root=./dataset/MS1M_v3/agedb_30/ \ 33 | -o DataLoader.Eval.dataset.cls_label_path=./dataset/MS1M_v3/agedb_30/label.txt \ 34 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/IResNet100_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/IResNet100_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Model.class_num=93431 \ 30 | -o DataLoader.Train.dataset.image_root=./dataset/MS1M_v3/ \ 31 | -o DataLoader.Train.dataset.cls_label_path=./dataset/MS1M_v3/label.txt \ 32 | -o DataLoader.Eval.dataset.image_root=./dataset/MS1M_v3/agedb_30/ \ 33 | -o DataLoader.Eval.dataset.cls_label_path=./dataset/MS1M_v3/agedb_30/label.txt 34 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/IResNet50_MS1MV3_ArcFace_pfc01_1n1c_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Model.pfc_config.sample_ratio=0.1 30 | 31 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/IResNet50_MS1MV3_ArcFace_pfc01_1n8c_dp8_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Model.pfc_config.sample_ratio=0.1 \ 30 | -o Model.pfc_config.model_parallel=False \ 31 | -o Loss.Train.0.MarginLoss.model_parallel=False 32 | 33 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 29 | -------------------------------------------------------------------------------- /tests/CI/recognition/face/MobileFaceNet_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | export PADDLE_NNODES=1 16 | export PADDLE_MASTER="127.0.0.1:12538" 17 | export CUDA_VISIBLE_DEVICES=0 18 | 19 | python -m paddle.distributed.launch \ 20 | --nnodes=$PADDLE_NNODES \ 21 | --master=$PADDLE_MASTER \ 22 | --devices=$CUDA_VISIBLE_DEVICES \ 23 | plsc-train \ 24 | -c ../../task/recognition/face/configs/MobileFaceNet_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.yaml \ 25 | -o Global.print_batch_step=1 \ 26 | -o Global.max_train_step=200 \ 27 | -o Global.flags.FLAGS_cudnn_exhaustive_search=0 \ 28 | -o Global.flags.FLAGS_cudnn_deterministic=1 \ 29 | -o Model.class_num=93431 \ 30 | -o DataLoader.Train.dataset.image_root=./dataset/MS1M_v3/ \ 31 | -o DataLoader.Train.dataset.cls_label_path=./dataset/MS1M_v3/label.txt \ 32 | -o DataLoader.Eval.dataset.image_root=./dataset/MS1M_v3/agedb_30/ \ 33 | -o DataLoader.Eval.dataset.cls_label_path=./dataset/MS1M_v3/agedb_30/label.txt \ 34 | 35 | -------------------------------------------------------------------------------- /tests/CI/run_all.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #!/usr/bin/env bash 16 | set -e 17 | 18 | bash ./before_hook.sh 19 | bash ./case.sh 20 | bash ./end_hook.sh 21 | -------------------------------------------------------------------------------- /tests/CI/ssl/cae/cae_base_patch16_224_ft_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | tmp_my_name=finetune_ep100_fp16o1 16 | my_name=${tmp_my_name%.*} 17 | OUTPUT_DIR='./output/'$my_name 18 | echo $OUTPUT_DIR 19 | DATA_PATH='./dataset/ILSVRC2012/' 20 | MODEL_PATH='pretrained/cae/cae_base_patch16_224_8k_vocab_pretrained_800ep.pd' 21 | FLAGS_cudnn_exhaustive_search=True 22 | export FLAGS_gemm_use_half_precision_compute_type=False 23 | 24 | python -m paddle.distributed.launch \ 25 | --nnodes=$PADDLE_NNODES \ 26 | --master=$PADDLE_MASTER \ 27 | --devices=$CUDA_VISIBLE_DEVICES \ 28 | ../../task/ssl/cae/main_finetune.py \ 29 | --print_freq 1 \ 30 | --max_train_step 200 \ 31 | --data_path ${DATA_PATH} \ 32 | --output_dir ${OUTPUT_DIR} \ 33 | --model cae_base_patch16_224 \ 34 | --finetune $MODEL_PATH \ 35 | --nb_classes 1000 \ 36 | --batch_size 128 \ 37 | --lr 8e-3 \ 38 | --accum_iter 1 \ 39 | --warmup_epochs 5 \ 40 | --epochs 100 \ 41 | --layer_decay 0.65 \ 42 | --drop_path 0.1 \ 43 | --weight_decay 0.05 \ 44 | --mixup 0.8 \ 45 | --cutmix 1.0 \ 46 | --sin_pos_emb \ 47 | --dist_eval \ 48 | --no_auto_resume \ 49 | --exp_name $my_name 50 | -------------------------------------------------------------------------------- /tests/CI/ssl/cae/cae_base_patch16_224_lp_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | tmp_my_name=linprobe_ep90_fp16o1 16 | my_name=${tmp_my_name%.*} 17 | OUTPUT_DIR='./output/'$my_name 18 | echo $OUTPUT_DIR 19 | DATA_PATH='./dataset/ILSVRC2012/' 20 | MODEL_PATH='pretrained/cae/cae_base_patch16_224_8k_vocab_pretrained_800ep.pd' 21 | FLAGS_cudnn_exhaustive_search=True 22 | export FLAGS_gemm_use_half_precision_compute_type=False 23 | 24 | python -m paddle.distributed.launch \ 25 | --nnodes=$PADDLE_NNODES \ 26 | --master=$PADDLE_MASTER \ 27 | --devices=$CUDA_VISIBLE_DEVICES \ 28 | ../../task/ssl/cae/main_linprobe.py \ 29 | --print_freq 1 \ 30 | --max_train_step 200 \ 31 | --data_path ${DATA_PATH} \ 32 | --output_dir ${OUTPUT_DIR} \ 33 | --model cae_base_patch16_224 \ 34 | --finetune $MODEL_PATH \ 35 | --nb_classes 1000 \ 36 | --batch_size 512 \ 37 | --epochs 90 \ 38 | --blr 0.1 \ 39 | --weight_decay 0.0 \ 40 | --dist_eval \ 41 | --log_dir $OUTPUT_DIR \ 42 | --enable_linear_eval \ 43 | --use_cls \ 44 | --save_freq 50 \ 45 | --disable_rel_pos_bias \ 46 | --linear_type standard \ 47 | --exp_name $my_name 48 | -------------------------------------------------------------------------------- /tests/CI/ssl/cae/cae_base_patch16_224_pt_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | tmp_my_name=ep800_fp16o1 16 | my_name=${tmp_my_name%.*} 17 | OUTPUT_DIR='./output/'$my_name 18 | echo $OUTPUT_DIR 19 | DATA_PATH='./dataset/ILSVRC2012/' 20 | TOKENIZER_PATH=pretrained/dalle-weights 21 | FLAGS_cudnn_exhaustive_search=True 22 | export FLAGS_gemm_use_half_precision_compute_type=False 23 | 24 | python -m paddle.distributed.launch \ 25 | --nnodes=$PADDLE_NNODES \ 26 | --master=$PADDLE_MASTER \ 27 | --devices=$CUDA_VISIBLE_DEVICES \ 28 | ../../task/ssl/cae/main_pretrain.py \ 29 | --print_freq 1 \ 30 | --max_train_step 200 \ 31 | --data_path ${DATA_PATH} \ 32 | --output_dir ${OUTPUT_DIR} \ 33 | --model cae_base_patch16_224_8k_vocab --discrete_vae_weight_path ${TOKENIZER_PATH} \ 34 | --batch_size 64 --lr 1.5e-3 --warmup_epochs 10 --epochs 800 \ 35 | --clip_grad 3.0 --layer_scale_init_value 0.1 \ 36 | --imagenet_default_mean_and_std \ 37 | --color_jitter 0 \ 38 | --drop_path 0 \ 39 | --sincos_pos_emb \ 40 | --mask_generator block \ 41 | --num_mask_patches 75 \ 42 | --decoder_layer_scale_init_value 0.1 \ 43 | --no_auto_resume \ 44 | --save_ckpt_freq 50 \ 45 | --exp_name $my_name \ 46 | --regressor_depth 4 \ 47 | --seed 0 \ 48 | --log_dir vdl \ 49 | --num_decoder_self_attention 4 \ 50 | --dual_loss_weight 2 \ 51 | --dual_path_ema 0 52 | -------------------------------------------------------------------------------- /tests/CI/ssl/convmae/convmae_convvit_base_patch16_ft_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=ConvMAE 20 | 21 | # 4 nodes finetune setting 22 | ACCUM_ITER=1 23 | PRETRAIN_CHKPT='pretrained/convmae/convmae_convvit_base_pretrained_1599ep.pd' 24 | IMAGENET_DIR=./dataset/ILSVRC2012/ 25 | python -m paddle.distributed.launch \ 26 | --nnodes=$PADDLE_NNODES \ 27 | --master=$PADDLE_MASTER \ 28 | --devices=$CUDA_VISIBLE_DEVICES \ 29 | ../../task/ssl/mae/main_finetune.py \ 30 | --accum_iter $ACCUM_ITER \ 31 | --print_freq 1 \ 32 | --max_train_step 600 \ 33 | --batch_size 32 \ 34 | --model convvit_base_patch16 \ 35 | --finetune ${PRETRAIN_CHKPT} \ 36 | --epochs 100 \ 37 | --blr 5e-4 --layer_decay 0.65 \ 38 | --weight_decay 0.05 --drop_path 0.1 --reprob 0.25 --mixup 0.8 --cutmix 1.0 \ 39 | --dist_eval --data_path ${IMAGENET_DIR} 40 | -------------------------------------------------------------------------------- /tests/CI/ssl/convmae/convmae_convvit_base_patch16_lp_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=ConvMAE 20 | 21 | IMAGENET_DIR=./dataset/ILSVRC2012/ 22 | 23 | # 1 for four node, 4 for single node 24 | ACCUM_ITER=1 25 | PRETRAIN_CHKPT='pretrained/convmae/convmae_convvit_base_pretrained_1599ep.pd' 26 | python -m paddle.distributed.launch \ 27 | --nnodes=$PADDLE_NNODES \ 28 | --master=$PADDLE_MASTER \ 29 | --devices=$CUDA_VISIBLE_DEVICES \ 30 | ../../task/ssl/mae/main_linprobe.py \ 31 | --accum_iter $ACCUM_ITER \ 32 | --print_freq 1 \ 33 | --max_train_step 200 \ 34 | --batch_size 128 \ 35 | --model convvit_base_patch16 \ 36 | --global_pool \ 37 | --finetune ${PRETRAIN_CHKPT} \ 38 | --epochs 90 \ 39 | --blr 0.1 \ 40 | --weight_decay 0.0 \ 41 | --dist_eval --data_path ${IMAGENET_DIR} 42 | -------------------------------------------------------------------------------- /tests/CI/ssl/convmae/convmae_convvit_base_patch16_pt_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=3 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=ConvMAE 20 | 21 | # 3 nodes for pretrain 22 | ACCUM_ITER=2 23 | IMAGENET_DIR=./dataset/ILSVRC2012/ 24 | python -m paddle.distributed.launch \ 25 | --nnodes=$PADDLE_NNODES \ 26 | --master=$PADDLE_MASTER \ 27 | --devices=$CUDA_VISIBLE_DEVICES \ 28 | ../../task/ssl/mae/main_pretrain.py \ 29 | --accum_iter $ACCUM_ITER \ 30 | --print_freq 1 \ 31 | --max_train_step 100 \ 32 | --batch_size 64 \ 33 | --model convmae_convvit_base_patch16 \ 34 | --norm_pix_loss \ 35 | --mask_ratio 0.75 \ 36 | --epochs 1600 \ 37 | --warmup_epochs 40 \ 38 | --blr 1.5e-4 --weight_decay 0.05 \ 39 | --data_path ${IMAGENET_DIR} 40 | -------------------------------------------------------------------------------- /tests/CI/ssl/mae/mae_vit_base_patch16_ft_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=MAE 20 | 21 | # for single node finetune 22 | # batch_size 32, ACCUM_ITER=4, effective batch size: 1024 23 | # batch_size 128, ACCUM_ITER=1, effective batch size: 1024 24 | 25 | # 4 nodes finetune setting 26 | ACCUM_ITER=1 27 | PRETRAIN_CHKPT='pretrained/mae/mae_pretrain_vit_base_1599ep.pd' 28 | IMAGENET_DIR=./dataset/ILSVRC2012/ 29 | python -m paddle.distributed.launch \ 30 | --nnodes=$PADDLE_NNODES \ 31 | --master=$PADDLE_MASTER \ 32 | --devices=$CUDA_VISIBLE_DEVICES \ 33 | ../../task/ssl/mae/main_finetune.py \ 34 | --accum_iter $ACCUM_ITER \ 35 | --print_freq 1 \ 36 | --max_train_step 600 \ 37 | --batch_size 32 \ 38 | --model maevit_base_patch16 \ 39 | --finetune ${PRETRAIN_CHKPT} \ 40 | --epochs 100 \ 41 | --blr 5e-4 --layer_decay 0.65 \ 42 | --weight_decay 0.05 --drop_path 0.1 --reprob 0.25 --mixup 0.8 --cutmix 1.0 \ 43 | --dist_eval --data_path ${IMAGENET_DIR} 44 | -------------------------------------------------------------------------------- /tests/CI/ssl/mae/mae_vit_base_patch16_lp_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=1 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=MAE 20 | 21 | IMAGENET_DIR=./dataset/ILSVRC2012/ 22 | 23 | # 1 for four node, 4 for single node 24 | ACCUM_ITER=1 25 | PRETRAIN_CHKPT='pretrained/mae/mae_pretrain_vit_base_1599ep.pd' 26 | python -m paddle.distributed.launch \ 27 | --nnodes=$PADDLE_NNODES \ 28 | --master=$PADDLE_MASTER \ 29 | --devices=$CUDA_VISIBLE_DEVICES \ 30 | ../../task/ssl/mae/main_linprobe.py \ 31 | --accum_iter $ACCUM_ITER \ 32 | --print_freq 1 \ 33 | --max_train_step 200 \ 34 | --batch_size 512 \ 35 | --model maevit_base_patch16 \ 36 | --cls_token \ 37 | --finetune ${PRETRAIN_CHKPT} \ 38 | --epochs 90 \ 39 | --blr 0.1 \ 40 | --weight_decay 0.0 \ 41 | --dist_eval --data_path ${IMAGENET_DIR} 42 | -------------------------------------------------------------------------------- /tests/CI/ssl/mae/mae_vit_base_patch16_pt_in1k_1n8c_dp_fp16o1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | #unset PADDLE_TRAINER_ENDPOINTS 16 | #export PADDLE_NNODES=4 17 | #export PADDLE_MASTER="10.67.228.16:12538" 18 | #export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 19 | #export PADDLE_JOB_ID=MAE 20 | 21 | # If you use single node 22 | # batch_size 64, ACCUM_ITER=8, effective batch size: 4096 23 | # batch_size 256, ACCUM_ITER=2, effective batch size: 4096 24 | 25 | # 4 nodes for pretrain 26 | ACCUM_ITER=1 27 | IMAGENET_DIR=./dataset/ILSVRC2012/ 28 | python -m paddle.distributed.launch \ 29 | --nnodes=$PADDLE_NNODES \ 30 | --master=$PADDLE_MASTER \ 31 | --devices=$CUDA_VISIBLE_DEVICES \ 32 | ../../task/ssl/mae/main_pretrain.py \ 33 | --accum_iter $ACCUM_ITER \ 34 | --print_freq 1 \ 35 | --max_train_step 200 \ 36 | --batch_size 128 \ 37 | --model mae_vit_base_patch16 \ 38 | --norm_pix_loss \ 39 | --mask_ratio 0.75 \ 40 | --epochs 1600 \ 41 | --warmup_epochs 40 \ 42 | --blr 1.5e-4 --weight_decay 0.05 \ 43 | --data_path ${IMAGENET_DIR} 44 | -------------------------------------------------------------------------------- /tests/test_tipc/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/PLSC/bd0c824649f820d18711bc268381321545683256/tests/test_tipc/README.md -------------------------------------------------------------------------------- /tests/test_tipc/classification/N1C8/ConvNeXt_base_224_bs512_fp32_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=ConvNeXt_base_224 16 | fp_item=fp32 17 | bs_item=512 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/classification/convnext/configs/ConvNeXt_base_224_in1k_1n8c_dp_fp32.yaml 21 | max_iter=623 # epoch=2 22 | accum_steps=8 23 | 24 | bash ./tests/test_tipc/classification/benchmark_common/prepare.sh 25 | # run 26 | bash ./tests/test_tipc/classification/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 27 | ${max_iter} ${accum_steps} 2>&1; 28 | -------------------------------------------------------------------------------- /tests/test_tipc/classification/N1C8/DeiT_base_patch16_224_bs128_fp16o2_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=DeiT_base_patch16_224 16 | fp_item=fp16o2 17 | bs_item=128 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/classification/deit/configs/DeiT_base_patch16_224_in1k_1n8c_dp_fp16o2.yaml 21 | max_iter=8756 # epoch=7 22 | 23 | bash ./tests/test_tipc/classification/benchmark_common/prepare.sh 24 | # run 25 | bash ./tests/test_tipc/classification/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 26 | ${max_iter} 2>&1; 27 | -------------------------------------------------------------------------------- /tests/test_tipc/classification/N1C8/DeiT_base_patch16_224_bs128_fp32_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=DeiT_base_patch16_224 16 | fp_item=fp32 17 | bs_item=128 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/classification/deit/configs/DeiT_base_patch16_224_in1k_1n8c_dp_fp32.yaml 21 | max_iter=2501 # epoch=2 22 | accum_steps=2 23 | 24 | bash ./tests/test_tipc/classification/benchmark_common/prepare.sh 25 | # run 26 | bash ./tests/test_tipc/classification/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 27 | ${max_iter} ${accum_steps} 2>&1; 28 | -------------------------------------------------------------------------------- /tests/test_tipc/classification/N1C8/ViT_base_patch16_224_bs512_fp16_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=ViT_base_patch16_224 16 | fp_item=fp16 17 | bs_item=512 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/classification/vit/configs/ViT_base_patch16_224_in1k_1n8c_dp_fp16o2.yaml 21 | max_iter=1564 # epoch=5 22 | accum_steps=2 23 | 24 | bash ./tests/test_tipc/classification/benchmark_common/prepare.sh 25 | # run 26 | bash ./tests/test_tipc/classification/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 27 | ${max_iter} ${accum_steps} 2>&1; 28 | -------------------------------------------------------------------------------- /tests/test_tipc/classification/N1C8/ViT_base_patch16_384_ft_bs512_fp16_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=ViT_base_patch16_384_ft 16 | fp_item=fp16 17 | bs_item=512 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/classification/vit/configs/ViT_base_patch16_384_ft_in1k_1n8c_dp_fp16o2.yaml 21 | max_iter=2501 # epoch=1 22 | accum_steps=2 23 | pretrained_model=./pretrained/ViT_base_patch16_224/imagenet2012-ViT-B_16-224 24 | 25 | bash ./tests/test_tipc/classification/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/classification/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 28 | ${max_iter} ${accum_steps} ${pretrained_model} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/classification/N1C8/cait_s24_224_bs128_fp16o2_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=cait_s24_224 16 | fp_item=fp16o2 17 | bs_item=128 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/classification/cait/configs/cait_s24_224_in1k_1n8c_dp_fp16o2.yaml 21 | max_iter=6254 # epoch=5 22 | 23 | bash ./tests/test_tipc/classification/benchmark_common/prepare.sh 24 | # run 25 | bash ./tests/test_tipc/classification/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 26 | ${max_iter} 2>&1; 27 | -------------------------------------------------------------------------------- /tests/test_tipc/classification/N1C8/swin_base_patch4_window7_224_bs128_fp16o2_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=swin_base_patch4_window7_224 16 | fp_item=fp16o2 17 | bs_item=128 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/classification/swin/configs/swin_base_patch4_window7_224_in1k_1n8c_dp_fp16o2.yaml 21 | max_iter=3755 # epoch=3 22 | 23 | bash ./tests/test_tipc/classification/benchmark_common/prepare.sh 24 | # run 25 | bash ./tests/test_tipc/classification/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 26 | ${max_iter} 2>&1; 27 | -------------------------------------------------------------------------------- /tests/test_tipc/classification/benchmark_common/prepare.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | unset http_proxy https_proxy 16 | python -m pip install -r requirements.txt --force-reinstall 17 | python -m pip uninstall protobuf -y 18 | python -m pip install protobuf==3.20.3 --force-reinstall 19 | python setup.py develop 20 | 21 | # dataset 22 | mkdir dataset && cd dataset 23 | cp -r ${BENCHMARK_ROOT}/models_data_cfs/Paddle_distributed/ILSVRC2012.tgz ./ 24 | tar -zxf ILSVRC2012.tgz 25 | cd - 26 | 27 | # pretrained 28 | mkdir -p pretrained/ViT_base_patch16_224 && cd pretrained/ViT_base_patch16_224 29 | wget https://plsc.bj.bcebos.com/models/vit/v2.4/imagenet2012-ViT-B_16-224.pdparams 30 | cd - 31 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/N1C1/IResNet50_pfc01_bs128_fp16_DP1-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=IResNet50_pfc01 16 | fp_item=fp16 17 | bs_item=128 18 | run_mode=DP1-MP1 19 | device_num=N1C1 20 | yaml_path=./task/recognition/face/configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml 21 | max_iter=20000 # epoch=1 22 | sample_ratio=0.1 23 | 24 | bash ./tests/test_tipc/recognition/benchmark_common/prepare.sh 25 | # run 26 | bash ./tests/test_tipc/recognition/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 27 | ${max_iter} ${sample_ratio} 2>&1; 28 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/N1C8/FaceViT_base_patch9_112_pfc03_bs128_fp16_DP8-MP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=FaceViT_base_patch9_112_pfc03 16 | fp_item=fp16 17 | bs_item=128 18 | run_mode=DP8-MP8 19 | device_num=N1C8 20 | yaml_path=./task/recognition/face/configs/FaceViT_base_patch9_112_WebFace42M_CosFace_pfc03_droppath005_mask005_1n8c_dp_mp_fp16o1.yaml \ 21 | max_iter=10117 # epoch=2 22 | sample_ratio=0.3 23 | model_parallel=True 24 | 25 | bash ./tests/test_tipc/recognition/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/recognition/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 28 | ${max_iter} ${sample_ratio} ${model_parallel} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/N1C8/FaceViT_tiny_patch9_112_pfc02_bs256_fp16_DP8-MP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=FaceViT_tiny_patch9_112_pfc02 16 | fp_item=fp16 17 | bs_item=256 18 | run_mode=DP8-MP8 19 | device_num=N1C8 20 | yaml_path=./task/recognition/face/configs/FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc02_droppath005_mask0_1n8c_dp_mp_fp16o1.yaml \ 21 | max_iter=7589 # epoch=3 22 | sample_ratio=0.2 23 | model_parallel=True 24 | 25 | bash ./tests/test_tipc/recognition/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/recognition/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 28 | ${max_iter} ${sample_ratio} ${model_parallel} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/N1C8/FaceViT_tiny_patch9_112_pfc10_bs256_fp16_DP8-MP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=FaceViT_tiny_patch9_112_pfc10 16 | fp_item=fp16 17 | bs_item=256 18 | run_mode=DP8-MP8 19 | device_num=N1C8 20 | yaml_path=./task/recognition/face/configs/FaceViT_tiny_patch9_112_WebFace42M_CosFace_pfc10_droppath005_mask0_1n8c_dp_mp_fp16o1.yaml \ 21 | max_iter=7589 # epoch=3 22 | sample_ratio=1.0 23 | model_parallel=True 24 | 25 | bash ./tests/test_tipc/recognition/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/recognition/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 28 | ${max_iter} ${sample_ratio} ${model_parallel} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/N1C8/IResNet100_pfc02_bs128_fp16_DP8-MP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=IResNet100_pfc02 16 | fp_item=fp16 17 | bs_item=128 18 | run_mode=DP8-MP8 19 | device_num=N1C8 20 | yaml_path=./task/recognition/face/configs/IResNet100_WebFace42M_CosFace_pfc02_1n8c_dp_mp_fp16o1.yaml \ 21 | max_iter=15176 # epoch=3 22 | sample_ratio=0.2 23 | model_parallel=True 24 | 25 | bash ./tests/test_tipc/recognition/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/recognition/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 28 | ${max_iter} ${sample_ratio} ${model_parallel} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/N1C8/IResNet50_pfc01_bs128_fp16_DP8-MP1.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=IResNet50_pfc01 16 | fp_item=fp16 17 | bs_item=128 18 | run_mode=DP8-MP1 19 | device_num=N1C8 20 | yaml_path=./task/recognition/face/configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 21 | max_iter=15176 # epoch=3 22 | sample_ratio=0.1 23 | model_parallel=False 24 | 25 | bash ./tests/test_tipc/recognition/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/recognition/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 28 | ${max_iter} ${sample_ratio} ${model_parallel} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/N1C8/IResNet50_pfc10_bs128_fp16_DP8-MP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=IResNet50_pfc10 16 | fp_item=fp16 17 | bs_item=128 18 | run_mode=DP8-MP8 19 | device_num=N1C8 20 | yaml_path=./task/recognition/face/configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 21 | max_iter=15176 # epoch=3 22 | sample_ratio=1.0 23 | model_parallel=True 24 | 25 | bash ./tests/test_tipc/recognition/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/recognition/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} ${yaml_path} \ 28 | ${max_iter} ${sample_ratio} ${model_parallel} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/recognition/benchmark_common/prepare.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | unset http_proxy https_proxy 16 | python -m pip install -r requirements.txt --force-reinstall 17 | python -m pip uninstall protobuf -y 18 | python -m pip install protobuf==3.20.3 --force-reinstall 19 | python setup.py develop 20 | 21 | # dataset 22 | mkdir dataset && cd dataset 23 | cp -r ${BENCHMARK_ROOT}/models_data_cfs/Paddle_distributed/MS1M_v3.tgz ./ 24 | tar -zxf MS1M_v3.tgz 25 | rm -rf MS1M_v3.tgz 26 | cd - 27 | -------------------------------------------------------------------------------- /tests/test_tipc/ssl/N1C8/convmae_convvit_base_patch16_ft_bs32_fp16o1_DP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=convmae_convvit_base_patch16_ft 16 | fp_item=fp16o1 17 | bs_item=32 18 | run_mode=DP8 19 | device_num=N1C8 20 | mode=ft 21 | model=convvit_base_patch16 22 | max_iter=10007 #epoch=2 23 | PRETRAIN_CHKPT='pretrained/convmae/convmae_convvit_base_pretrained_1599ep.pd' 24 | 25 | bash ./tests/test_tipc/ssl/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/ssl/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} \ 28 | ${mode} ${model} ${max_iter} ${PRETRAIN_CHKPT} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/ssl/N1C8/convmae_convvit_base_patch16_lp_bs128_fp16o1_DP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=convmae_convvit_base_patch16_lp 16 | fp_item=fp16o1 17 | bs_item=128 18 | run_mode=DP8 19 | device_num=N1C8 20 | mode=lp 21 | model=convvit_base_patch16 22 | max_iter=6254 # epoch=5 23 | PRETRAIN_CHKPT='pretrained/convmae/convmae_convvit_base_pretrained_1599ep.pd' 24 | 25 | bash ./tests/test_tipc/ssl/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/ssl/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} \ 28 | ${mode} ${model} ${max_iter} ${PRETRAIN_CHKPT} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/ssl/N1C8/convmae_convvit_base_patch16_pt_bs64_fp16o1_DP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=convmae_convvit_base_patch16_pt 16 | fp_item=fp16o1 17 | bs_item=64 18 | run_mode=DP8 19 | device_num=N1C8 20 | mode=pt 21 | model=convmae_convvit_base_patch16 22 | max_iter=5004 # epoch=2 23 | PRETRAIN_CHKPT="" 24 | accum_iter=2 25 | 26 | bash ./tests/test_tipc/ssl/benchmark_common/prepare.sh 27 | # run 28 | bash ./tests/test_tipc/ssl/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} \ 29 | ${mode} ${model} ${max_iter} ${accum_iter} 2>&1; 30 | -------------------------------------------------------------------------------- /tests/test_tipc/ssl/N1C8/mae_vit_base_patch16_ft_bs32_fp16o1_DP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=mae_vit_base_patch16_ft 16 | fp_item=fp16o1 17 | bs_item=32 18 | run_mode=DP8 19 | device_num=N1C8 20 | mode=ft 21 | model=maevit_base_patch16 22 | max_iter=5004 # epoch=1 23 | PRETRAIN_CHKPT='pretrained/mae/mae_pretrain_vit_base_1599ep.pd' 24 | 25 | bash ./tests/test_tipc/ssl/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/ssl/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} \ 28 | ${mode} ${model} ${max_iter} ${PRETRAIN_CHKPT} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/ssl/N1C8/mae_vit_base_patch16_lp_bs512_fp16o1_DP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=mae_vit_base_patch16_lp 16 | fp_item=fp16o1 17 | bs_item=512 18 | run_mode=DP8 19 | device_num=N1C8 20 | mode=lp 21 | model=maevit_base_patch16 22 | max_iter=1559 # epoch=5 23 | PRETRAIN_CHKPT='pretrained/mae/mae_pretrain_vit_base_1599ep.pd' 24 | 25 | bash ./tests/test_tipc/ssl/benchmark_common/prepare.sh 26 | # run 27 | bash ./tests/test_tipc/ssl/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} \ 28 | ${mode} ${model} ${max_iter} ${PRETRAIN_CHKPT} 2>&1; 29 | -------------------------------------------------------------------------------- /tests/test_tipc/ssl/N1C8/mae_vit_base_patch16_pt_bs128_fp16o1_DP8.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | model_item=mae_vit_base_patch16_pt 16 | fp_item=fp16o1 17 | bs_item=128 18 | run_mode=DP8 19 | device_num=N1C8 20 | mode=pt 21 | model=mae_vit_base_patch16 22 | max_iter=6254 # epoch=5 23 | 24 | bash ./tests/test_tipc/ssl/benchmark_common/prepare.sh 25 | # run 26 | bash ./tests/test_tipc/ssl/benchmark_common/run_benchmark.sh ${model_item} ${fp_item} ${bs_item} ${run_mode} ${device_num} \ 27 | ${mode} ${model} ${max_iter} 2>&1; 28 | -------------------------------------------------------------------------------- /tests/test_tipc/ssl/benchmark_common/prepare.sh: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | unset http_proxy https_proxy 16 | python -m pip install -r requirements.txt --force-reinstall 17 | python setup.py develop 18 | 19 | # dataset 20 | mkdir dataset && cd dataset 21 | cp -r ${BENCHMARK_ROOT}/models_data_cfs/Paddle_distributed/ILSVRC2012.tgz ./ 22 | tar -zxf ILSVRC2012.tgz 23 | rm -rf ILSVRC2012.tgz 24 | cd - 25 | 26 | # pretrained 27 | mkdir -p pretrained/convmae && cd pretrained/convmae 28 | wget https://plsc.bj.bcebos.com/models/convmae/v2.5/convmae_convvit_base_pretrained_1599ep.pd 29 | cd - 30 | mkdir -p pretrained/mae && cd pretrained/mae 31 | wget https://plsc.bj.bcebos.com/models/mae/v2.4/mae_pretrain_vit_base_1599ep.pd 32 | cd - 33 | -------------------------------------------------------------------------------- /tools/eval.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import paddle 20 | paddle.disable_static() 21 | 22 | from plsc.utils import config as cfg_util 23 | from plsc.engine.engine import Engine 24 | 25 | 26 | def main(): 27 | args = cfg_util.parse_args() 28 | config = cfg_util.get_config( 29 | args.config, overrides=args.override, show=False) 30 | config.profiler_options = args.profiler_options 31 | engine = Engine(config, mode="eval") 32 | ret = engine.eval() 33 | print(ret) 34 | 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /tools/export.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import paddle 20 | paddle.disable_static() 21 | 22 | from plsc.utils import config as cfg_util 23 | from plsc.engine.engine import Engine 24 | 25 | 26 | def main(): 27 | args = cfg_util.parse_args() 28 | config = cfg_util.get_config( 29 | args.config, overrides=args.override, show=False) 30 | config.profiler_options = args.profiler_options 31 | engine = Engine(config, mode="export") 32 | engine.export() 33 | 34 | 35 | if __name__ == "__main__": 36 | main() 37 | -------------------------------------------------------------------------------- /tools/train.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from __future__ import absolute_import 16 | from __future__ import division 17 | from __future__ import print_function 18 | 19 | import paddle 20 | paddle.disable_static() 21 | 22 | from plsc.utils import config as cfg_util 23 | from plsc.engine.engine import Engine 24 | 25 | 26 | def main(): 27 | args = cfg_util.parse_args() 28 | config = cfg_util.get_config( 29 | args.config, overrides=args.override, show=False) 30 | config.profiler_options = args.profiler_options 31 | engine = Engine(config, mode="train") 32 | engine.train() 33 | 34 | 35 | if __name__ == "__main__": 36 | main() 37 | -------------------------------------------------------------------------------- /tutorials/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/PLSC/bd0c824649f820d18711bc268381321545683256/tutorials/README.md -------------------------------------------------------------------------------- /tutorials/advanced/custom_dataset.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/PLSC/bd0c824649f820d18711bc268381321545683256/tutorials/advanced/custom_dataset.md -------------------------------------------------------------------------------- /tutorials/get_started/dataset.md: -------------------------------------------------------------------------------- 1 | ## Recognition 2 | 3 | ### Download Dataset 4 | 5 | Download the dataset from [insightface datasets](https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_). 6 | 7 | * MS1M_v2: MS1M-ArcFace 8 | * MS1M_v3: MS1M-RetinaFace 9 | 10 | ### Extract MXNet Dataset to Images 11 | 12 | ```shell 13 | # install mxnet firstly 14 | pip install mxnet-cu112 15 | python plsc/data/dataset/tools/mx_recordio_2_images.py --root_dir ms1m-retinaface-t1/ --output_dir dataset/MS1M_v3/ 16 | ``` 17 | 18 | After finishing unzipping the dataset, the folder structure is as follows. 19 | 20 | ``` 21 | MS1M_v3 22 | |_ images 23 | | |_ 00000001.jpg 24 | | |_ ... 25 | | |_ 05179510.jpg 26 | |_ label.txt 27 | |_ agedb_30.bin 28 | |_ cfp_ff.bin 29 | |_ cfp_fp.bin 30 | |_ lfw.bin 31 | ``` 32 | 33 | Label file format is as follows. 34 | 35 | ``` 36 | # delimiter: "\t" 37 | # the following the content of label.txt 38 | images/00000001.jpg 0 39 | ... 40 | ``` 41 | 42 | If you want to use customed dataset, you can arrange your data according to the above format. 43 | 44 | ### Convert Test Dataset bin File to images and label.txt 45 | ```shell 46 | python plsc/data/dataset/tools/lfw_style_bin_dataset_converter.py --bin_path ./dataset/MS1M_v3/agedb_30.bin --out_dir ./dataset/MS1M_v3/agedb_30/ --flip_test 47 | ``` 48 | -------------------------------------------------------------------------------- /tutorials/get_started/installation.md: -------------------------------------------------------------------------------- 1 | ## Install PaddlePaddle from whl Package 2 | ``` 3 | # [optional] modify cuda version, e.g. post112 to post116 4 | # require python==3.7 5 | python -m pip install paddlepaddle-gpu==0.0.0.post116 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html 6 | ``` 7 | 8 | ## Install PaddlePaddle from Source Code 9 | 10 | For more install information, ref to [PaddlePaddle](https://www.paddlepaddle.org.cn/) 11 | 12 | ```shell 13 | 14 | git clone https://github.com/PaddlePaddle/Paddle.git 15 | 16 | cd /path/to/Paddle/ 17 | 18 | mkdir build && cd build 19 | 20 | cmake .. -DWITH_TESTING=OFF -DWITH_GPU=ON -DWITH_GOLANG=OFF -DWITH_STYLE_CHECK=ON -DCMAKE_INSTALL_PREFIX=$PWD/output -DWITH_DISTRIBUTE=ON -DCMAKE_BUILD_TYPE=Release -DPY_VERSION=3.7 21 | 22 | make -j20 && make install -j20 23 | 24 | pip install output/opt/paddle/share/wheels/paddlepaddle_gpu-0.0.0-cp37-cp37m-linux_x86_64.whl 25 | 26 | ``` 27 | 28 | ## Install PLSC 29 | 30 | ```shell 31 | git clone https://github.com/PaddlePaddle/PLSC.git 32 | 33 | cd /path/to/PLSC/ 34 | # [optional] pip install -r requirements.txt 35 | python setup.py develop 36 | ``` 37 | -------------------------------------------------------------------------------- /tutorials/get_started/quick_run_recognition.md: -------------------------------------------------------------------------------- 1 | ### 1. Install PaddlePaddle and Download PLSC 2 | See [installation](./installation.md) 3 | 4 | ### 2. Data Preparation 5 | #### 2.1 Download Dummy Dataset and Unzip 6 | ``` bash 7 | cd task/recognition/face 8 | # download dummy dataset 9 | wget https://plsc.bj.bcebos.com/dataset/MS1M_v3_One_Sample.tgz 10 | # unzip 11 | mkdir -p ./dataset/ 12 | tar -xzf MS1M_v3_One_Sample.tgz -C ./dataset/ 13 | ``` 14 | #### 2.2 Extract LFW Style bin Dataset to Images 15 | ```bash 16 | python -m plsc.data.dataset.tools.lfw_style_bin_dataset_converter --bin_path ./dataset/MS1M_v3_One_Sample/agedb_30.bin \ 17 | --out_dir ./dataset/MS1M_v3_One_Sample/agedb_30/ --flip_test 18 | ``` 19 | 20 | ### 3. Run Train Scripts 21 | 22 | #### 3.1 Single Node with 1 GPU 23 | 24 | Run the script from command line. 25 | ``` bash 26 | # Here, for simplicity, we just reuse the single node 8 gpus yaml configuration file. 27 | export CUDA_VISIBLE_DEVICES=0 28 | plsc-train \ 29 | -c configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 30 | -o DataLoader.Train.dataset.image_root=./dataset/MS1M_v3_One_Sample \ 31 | -o DataLoader.Train.dataset.cls_label_path=./dataset/MS1M_v3_One_Sample/label.txt \ 32 | -o DataLoader.Eval.dataset.image_root=./dataset/MS1M_v3_One_Sample/agedb_30 \ 33 | -o DataLoader.Eval.dataset.cls_label_path=./dataset/MS1M_v3_One_Sample/agedb_30/label.txt 34 | ``` 35 | 36 | #### 3.2 Single Node with 8 GPUs 37 | 38 | Run the script from command line. 39 | ``` bash 40 | export PADDLE_NNODES=1 41 | export PADDLE_MASTER="127.0.0.1:12538" 42 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 43 | python -m paddle.distributed.launch \ 44 | --nnodes=$PADDLE_NNODES \ 45 | --master=$PADDLE_MASTER \ 46 | --devices=$CUDA_VISIBLE_DEVICES 47 | plsc-train \ 48 | -c configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 49 | -o DataLoader.Train.dataset.image_root=./dataset/MS1M_v3_One_Sample \ 50 | -o DataLoader.Train.dataset.cls_label_path=./dataset/MS1M_v3_One_Sample/label.txt \ 51 | -o DataLoader.Eval.dataset.image_root=./dataset/MS1M_v3_One_Sample/agedb_30 \ 52 | -o DataLoader.Eval.dataset.cls_label_path=./dataset/MS1M_v3_One_Sample/agedb_30/label.txt 53 | ``` 54 | 55 | ### 4. Export Inference Model 56 | 57 | #### 4.1 Single Node, 1 GPU: 58 | ``` bash 59 | export CUDA_VISIBLE_DEVICES=0 60 | plsc-export \ 61 | -c configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 62 | -o Global.pretrained_model=output/IResNet50/latest \ 63 | -o Model.data_format=NCHW 64 | ``` 65 | 66 | #### 4.2 Single Node, 8 GPUs: 67 | 68 | ``` bash 69 | export PADDLE_NNODES=1 70 | export PADDLE_MASTER="127.0.0.1:12538" 71 | export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 72 | 73 | python -m paddle.distributed.launch \ 74 | --nnodes=$PADDLE_NNODES \ 75 | --master=$PADDLE_MASTER \ 76 | --devices=$CUDA_VISIBLE_DEVICES \ 77 | plsc-export \ 78 | -c configs/IResNet50_MS1MV3_ArcFace_pfc10_1n8c_dp_mp_fp16o1.yaml \ 79 | -o Global.pretrained_model=output/IResNet50/latest \ 80 | -o Model.data_format=NCHW 81 | ``` 82 | -------------------------------------------------------------------------------- /version.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | __version__ = "2.5.0" 16 | --------------------------------------------------------------------------------