├── op └── README.md ├── distributed ├── README.md └── dist_fleet │ ├── thirdparty │ ├── data │ │ └── dist_data │ │ │ ├── README.md │ │ │ └── ImageNet │ │ │ └── train │ │ │ ├── n01440764_107.jpeg │ │ │ ├── n01443537_163.jpeg │ │ │ ├── n01484850_1849.jpeg │ │ │ └── n01491361_144.jpeg │ ├── __init__.py │ ├── image_classfication │ │ ├── __init__.py │ │ ├── utils │ │ │ └── __init__.py │ │ └── models │ │ │ └── __init__.py │ └── pslib │ │ └── dataset_generator.py │ ├── run.sh │ └── __init__.py ├── framework_api ├── README.md └── cts_tools.py ├── tools └── codestyle │ ├── .gitignore │ ├── clang_format.hook │ ├── pylint_pre_commit.hook │ └── cpplint_pre_commit.hook ├── inference ├── inference_benchmark │ ├── README.md │ ├── cc │ │ ├── Paddle │ │ │ ├── requirements.txt │ │ │ ├── intel_bash.sh │ │ │ ├── compile.sh │ │ │ └── bin │ │ │ │ └── run_debug_benchmark.sh │ │ ├── PyTorch │ │ │ ├── compile.sh │ │ │ └── src │ │ │ │ ├── external-cmake │ │ │ │ ├── glog.cmake │ │ │ │ └── gflag.cmake │ │ │ │ └── clas_benchmark.cc │ │ └── TensorFlow │ │ │ ├── compile.sh │ │ │ └── src │ │ │ ├── external-cmake │ │ │ ├── glog.cmake │ │ │ └── gflag.cmake │ │ │ └── CMakeLists.txt │ └── python │ │ ├── TensorFlow │ │ ├── requirements.txt │ │ └── README.md │ │ ├── Pytorch │ │ └── README.md │ │ └── Paddle │ │ ├── __init__.py │ │ └── memory.py ├── inference_api_test │ ├── python_api_test │ │ ├── tests │ │ │ ├── gpu │ │ │ │ ├── pytest.ini │ │ │ │ ├── test_yolov3_gpu.py │ │ │ │ ├── test_mask_rcnn_gpu.py │ │ │ │ ├── test_blazeface_gpu.py │ │ │ │ ├── test_faster_rcnn_gpu.py │ │ │ │ ├── test_resnet50_gpu.py │ │ │ │ ├── test_det_mv3_db_gpu.py │ │ │ │ ├── test_xception41_gpu.py │ │ │ │ ├── test_det_mv3_east_gpu.py │ │ │ │ ├── test_mobilenetv1_gpu.py │ │ │ │ ├── test_deeplabv3_gpu.py │ │ │ │ ├── test_seresnext50_gpu.py │ │ │ │ ├── test_hub_ernie_gpu.py │ │ │ │ ├── test_bert_emb_v1_gpu.py │ │ │ │ ├── test_rec_chinese_common_train_gpu.py │ │ │ │ ├── test_rec_r34_vd_tps_bilstm_attn_gpu.py │ │ │ │ ├── test_dete_dist_yolov3_v1_gpu.py │ │ │ │ ├── test_dete_prune_mask_rcnn_r50_1x_gpu.py │ │ │ │ └── test_dete_prune_yolov3_darknet_voc_gpu.py │ │ │ ├── cpu │ │ │ │ ├── test_yolov3_cpu.py │ │ │ │ ├── test_mask_rcnn_cpu.py │ │ │ │ ├── test_blazeface_cpu.py │ │ │ │ ├── test_resnet50_cpu.py │ │ │ │ ├── test_faster_rcnn_cpu.py │ │ │ │ ├── test_det_mv3_db_cpu.py │ │ │ │ ├── test_xception41_cpu.py │ │ │ │ ├── test_mobilenetv1_cpu.py │ │ │ │ ├── test_deeplabv3_cpu.py │ │ │ │ ├── test_det_mv3_east_cpu.py │ │ │ │ ├── test_seresnext50_cpu.py │ │ │ │ ├── test_rec_chinese_common_train_cpu.py │ │ │ │ └── test_rec_r34_vd_tps_bilstm_attn_cpu.py │ │ │ ├── mkldnn │ │ │ │ ├── test_yolov3_mkldnn.py │ │ │ │ ├── test_blazeface_mkldnn.py │ │ │ │ ├── test_mask_rcnn_mkldnn.py │ │ │ │ ├── test_faster_rcnn_mkldnn.py │ │ │ │ ├── test_resnet50_mkldnn.py │ │ │ │ ├── test_det_mv3_db_mkldnn.py │ │ │ │ ├── test_det_mv3_east_mkldnn.py │ │ │ │ ├── test_mobilenetv1_mkldnn.py │ │ │ │ ├── test_xception41_mkldnn.py │ │ │ │ ├── test_deeplabv3_mkldnn.py │ │ │ │ ├── test_seresnext50_mkldnn.py │ │ │ │ ├── test_hub_ernie_mkldnn.py │ │ │ │ ├── test_rec_chinese_common_train_mkldnn.py │ │ │ │ └── test_rec_r34_vd_tps_bilstm_attn_mkldnn.py │ │ │ └── trt_fp32 │ │ │ │ ├── test_yolov3_trt_fp32.py │ │ │ │ ├── test_blazeface_trt_fp32.py │ │ │ │ ├── test_resnet50_trt_fp32.py │ │ │ │ ├── test_det_mv3_db_trt_fp32.py │ │ │ │ ├── test_xception41_trt_fp32.py │ │ │ │ ├── test_deeplabv3_trt_fp32.py │ │ │ │ ├── test_mobilenetv1_trt_fp32.py │ │ │ │ ├── test_seresnext50_trt_fp32.py │ │ │ │ ├── test_mask_rcnn_trt_fp32.py │ │ │ │ └── test_faster_rcnn_trt_fp32.py │ │ ├── requirements.txt │ │ ├── src │ │ │ └── __init__.py │ │ └── go.sh │ ├── cpp_api_test │ │ ├── run_cpp.sh │ │ ├── bin │ │ │ ├── resnet.sh │ │ │ ├── bert.sh │ │ │ ├── run-case.sh │ │ │ ├── ocr.sh │ │ │ ├── text_classification.sh │ │ │ ├── py_sed.py │ │ │ ├── run-new-api-case-mini.sh │ │ │ ├── run-new-api-case-mini-trt.sh │ │ │ ├── run-new-api-case-jetson.sh │ │ │ ├── run-new-api-case-mini-native.sh │ │ │ ├── run-new-api-case-jetson-trt.sh │ │ │ ├── run-new-api-case-jetson-native.sh │ │ │ └── run-new-api-case.sh │ │ ├── jetson-build.sh │ │ ├── build.sh │ │ └── src │ │ │ └── external-cmake │ │ │ └── gtest-cpp.cmake │ ├── run_inference_case.sh │ └── init_env.sh ├── inference_test_utils │ └── README.md └── README.md ├── models └── args │ ├── requirements.txt │ ├── log │ └── README │ ├── PaddleNLP │ ├── pretrain_language_models │ │ ├── ELMo │ │ │ ├── run_prepare.sh │ │ │ └── test_args.py │ │ ├── XLNet │ │ │ ├── run_prepare.sh │ │ │ └── test_args.py │ │ └── BERT │ │ │ └── run_prepare.sh │ ├── seq2seq │ │ ├── seq2seq │ │ │ ├── run_prepare.sh │ │ │ └── test_args.py │ │ └── variational_seq2seq │ │ │ ├── run_prepare.sh │ │ │ └── test_args.py │ ├── language_model │ │ ├── test_args.py │ │ └── run_prepare.sh │ ├── dialogue_system │ │ ├── dialogue_general_understanding │ │ │ └── run_prepare.sh │ │ └── auto_dialogue_evaluation │ │ │ └── run_prepare.sh │ ├── dialogue_domain_classification │ │ ├── test_args.py │ │ └── run_prepare.sh │ ├── emotion_detection │ │ └── run_prepare.sh │ ├── machine_translation │ │ └── transformer │ │ │ └── run_prepare.sh │ ├── lexical_analysis │ │ └── run_prepare.sh │ ├── sentiment_classification │ │ └── run_prepare.sh │ └── similarity_net │ │ └── run_prepare.sh │ ├── conf │ ├── README │ └── args_batch.conf │ ├── PaddleNLP- │ └── dialogue_system │ │ └── auto_dialogue_evaluation │ │ └── run_prepare.sh │ ├── run.sh │ ├── commit.sh │ ├── commit.py │ ├── models.py │ └── run_test ├── test └── tools │ ├── tool-test-op-correctness │ └── cases │ │ ├── pytest.ini │ │ ├── readme.md │ │ ├── test_cos.py │ │ ├── test_atan.py │ │ ├── test_ceil.py │ │ ├── test_asin.py │ │ ├── test_cosh.py │ │ ├── test_acos.py │ │ ├── test_bernoulli.py │ │ ├── test_conj.py │ │ ├── test_Sigmoid.py │ │ └── test_relu6.py │ ├── tool-test-train-performance │ ├── data │ │ └── README.md │ ├── service.py │ ├── run_MobileNetV1.sh │ └── run_ResNet50_vd.sh │ ├── tool-test-cpu-train │ ├── vgg11.sh │ ├── vgg13.sh │ ├── vgg16.sh │ ├── vgg19.sh │ ├── resnet18.sh │ ├── resnet34.sh │ ├── resnet50.sh │ ├── resnet101.sh │ ├── mobilenetv1.sh │ └── mobilenetv2.sh │ ├── tool-test-dl-net │ ├── vgg11.sh │ ├── vgg13.sh │ ├── vgg16.sh │ ├── vgg19.sh │ ├── resnet101.sh │ ├── resnet18.sh │ ├── resnet34.sh │ ├── resnet50.sh │ ├── mobilenetv1.sh │ ├── mobilenetv2.sh │ └── tool.py │ ├── tool-test-gpu-train │ ├── vgg11.sh │ ├── vgg13.sh │ ├── vgg16.sh │ ├── vgg19.sh │ ├── resnet18.sh │ ├── resnet34.sh │ ├── resnet50.sh │ ├── resnet101.sh │ ├── mobilenetv1.sh │ └── mobilenetv2.sh │ ├── tool-test-inference │ └── test_case │ │ ├── __init__.py │ │ └── text_preprocess.py │ ├── tool-test-dl-algorithm-correctness │ └── test_case │ │ ├── __init__.py │ │ └── text_preprocess.py │ ├── start.sh │ ├── tool-check-availability-of-installation │ ├── tool.sh │ └── service.py │ └── tool-test-train-resource │ ├── service.py │ ├── run_MobileNetV1.sh │ └── run_ResNet50_vd.sh ├── .style.yapf ├── .gitignore ├── README.md ├── tipc ├── run.sh ├── check_loss.sh ├── tipc_run_cpp.sh └── prepare.sh ├── .travis.yml ├── test.py └── .pre-commit-config.yaml /op/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /distributed/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /framework_api/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/codestyle/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /inference/inference_benchmark/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /models/args/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | pandas 3 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/pytest.ini: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /models/args/log/README: -------------------------------------------------------------------------------- 1 | model args routine test logs 2 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | based_on_style = pep8 3 | column_limit = 80 4 | -------------------------------------------------------------------------------- /test/tools/tool-test-train-performance/data/README.md: -------------------------------------------------------------------------------- 1 | put data here 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/vgg11.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg11 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/vgg13.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg13 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/vgg16.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg16 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/vgg19.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg19 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/vgg11.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg11 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/vgg13.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg13 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/vgg16.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg16 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/vgg19.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg19 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/vgg11.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg11 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/vgg13.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg13 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/vgg16.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg16 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/vgg19.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name vgg19 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/resnet18.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet18 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/resnet34.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet34 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/resnet50.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet50 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/resnet101.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet101 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/resnet18.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet18 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/resnet34.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet34 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/resnet50.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet50 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/resnet18.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet18 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/resnet34.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet34 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/resnet50.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet50 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/resnet101.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet101 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/mobilenetv1.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name mobilenetv1 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/mobilenetv2.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name mobilenetv2 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/resnet101.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name resnet101 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/readme.md: -------------------------------------------------------------------------------- 1 | # 使用方式 2 | python run.py --help 3 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/mobilenetv1.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name mobilenetv1 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-cpu-train/mobilenetv2.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name mobilenetv2 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/mobilenetv1.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name mobilenetv1 2 | -------------------------------------------------------------------------------- /test/tools/tool-test-gpu-train/mobilenetv2.sh: -------------------------------------------------------------------------------- 1 | python tool.py --model_name mobilenetv2 2 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = -p no:warnings -------------------------------------------------------------------------------- /models/args/PaddleNLP/pretrain_language_models/ELMo/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #prepare data 4 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/Paddle/requirements.txt: -------------------------------------------------------------------------------- 1 | cup 2 | py3nvml 3 | pytz 4 | pandas 5 | openpyxl 6 | -------------------------------------------------------------------------------- /inference/inference_benchmark/python/TensorFlow/requirements.txt: -------------------------------------------------------------------------------- 1 | cup 2 | pytz 3 | py3nvml 4 | tensorflow_gpu 5 | -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/data/dist_data/README.md: -------------------------------------------------------------------------------- 1 | ## 分布式cts测试数据的最小集 2 | - ctr_data 3 | - word2vec 4 | - ImageNet 5 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/run_cpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo ${PADDLE_ROOT} 4 | 5 | bash bin/run-case.sh 6 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python==4.2.0.32 2 | cup==3.2.24 3 | py3nvml==0.2.6 4 | pytest==5.4.3 5 | -------------------------------------------------------------------------------- /test/tools/tool-test-inference/test_case/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 3 | """ 4 | init 5 | """ 6 | from .infer_test import InferenceTest 7 | -------------------------------------------------------------------------------- /inference/inference_test_utils/README.md: -------------------------------------------------------------------------------- 1 | # Inference test utils 2 | 3 | ```shell 4 | python benchmark_analysis.py --log_path=./tools/output \ 5 | --output_name=benchmark.xlsx; 6 | ``` 7 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-algorithm-correctness/test_case/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 3 | """ 4 | init 5 | """ 6 | from .infer_test import InferenceTest 7 | -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01440764_107.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/continuous_integration/HEAD/distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01440764_107.jpeg -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01443537_163.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/continuous_integration/HEAD/distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01443537_163.jpeg -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01484850_1849.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/continuous_integration/HEAD/distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01484850_1849.jpeg -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01491361_144.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaddlePaddle/continuous_integration/HEAD/distributed/dist_fleet/thirdparty/data/dist_data/ImageNet/train/n01491361_144.jpeg -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/Paddle/intel_bash.sh: -------------------------------------------------------------------------------- 1 | rm -rf build 2 | bash compile.sh all ON OFF OFF ${LIB_DIR} 3 | # bash bin/run_models_benchmark.sh "static" "cpu" "1" "1" 4 | DNNL_VERBOSE=1 bash bin/run_models_benchmark.sh "static" "cpu" "1" "1" 5 | 6 | -------------------------------------------------------------------------------- /test/tools/start.sh: -------------------------------------------------------------------------------- 1 | #1 /bin/bash 2 | 3 | source /paddle/python37/bin/active 4 | 5 | nohup python3 tool-api.py --callback_addr $1 >> tool-api.log 2>&1 & 6 | 7 | for n in `seq 11`; do 8 | cd tool-$n 9 | nohup python3 service.py >> tool-$n.log 2>&1 & 10 | cd - 11 | done 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.DS_Store 2 | *.vs 3 | build/ 4 | build_doc/ 5 | *.user 6 | 7 | .vscode 8 | .idea 9 | .project 10 | .cproject 11 | .pydevproject 12 | .settings/ 13 | CMakeSettings.json 14 | Makefile 15 | .test_env/ 16 | third_party/ 17 | 18 | *~ 19 | bazel-* 20 | 21 | build_* 22 | cmake-build-* 23 | __pycache__ 24 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/seq2seq/seq2seq/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | 4 | ROOT_PATH=$1 5 | 6 | if [ -e data ] 7 | then 8 | mv data data.bak 9 | fi 10 | if [ ! -e data.tgz ] 11 | then 12 | wget https://sys-p0.bj.bcebos.com/models/PaddleNLP/seq2seq/seq2seq/data.tgz --no-check-certificate 13 | fi 14 | tar -zxf data.tgz 15 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/seq2seq/variational_seq2seq/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | 4 | ROOT_PATH=$1 5 | 6 | if [ -e data ] 7 | then 8 | mv data data.bak 9 | fi 10 | if [ ! -e data.tgz ] 11 | then 12 | wget https://sys-p0.bj.bcebos.com/models/PaddleNLP/seq2seq/variational_seq2seq/data.tgz --no-check-certificate 13 | fi 14 | tar -zxf data.tgz 15 | -------------------------------------------------------------------------------- /test/tools/tool-check-availability-of-installation/tool.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | apt-get install python3-venv 4 | cur_env=$VIRTUAL_ENV 5 | new_env=`date +%Y-%m-%d-%H-%M-%S` 6 | echo $new_env 7 | 8 | python -m venv $new_env 9 | . ./${new_env}/bin/activate 10 | python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple 11 | 12 | python tool.py --check_item $1 13 | 14 | 15 | rm -rf $new_env 16 | -------------------------------------------------------------------------------- /inference/inference_api_test/run_inference_case.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sh build_py37_paddel.sh 4 | source init_env.sh 5 | 6 | ROOT=`dirname "$0"` 7 | ROOT=`cd "$ROOT/.."; pwd` 8 | CPP_ROOT=${ROOT}/inference_api_test/cpp_api_test 9 | PY_ROOT=${ROOT}/inference_api_test/python_api_test 10 | 11 | export PADDLE_ROOT=${ROOT}/inference_api_test/Paddle 12 | 13 | cd ${CPP_ROOT} 14 | sh run_cpp.sh 15 | 16 | cd ${PY_ROOT} 17 | sh run_case.sh 18 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/pretrain_language_models/ELMo/test_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/env/bin python 2 | """ 3 | this is elmo args 4 | """ 5 | 6 | train = { 7 | "train_path": ["data/train/sentence_file_*"], 8 | "test_path": ["data/dev/sentence_file_*"], 9 | "vocab_path": ["data/vocabulary_min5k.txt"], 10 | "learning_rate": [0.2], 11 | "use_gpu": [True], 12 | "all_train_tokens": ["35479"], 13 | "local": ["True $@"], 14 | } 15 | -------------------------------------------------------------------------------- /tools/codestyle/clang_format.hook: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | readonly VERSION="3.8" 5 | 6 | version=$(clang-format -version) 7 | 8 | if ! [[ $version == *"$VERSION"* ]]; then 9 | echo "clang-format version check failed." 10 | echo "a version contains '$VERSION' is needed, but get '$version'" 11 | echo "you can install the right version, and make an soft-link to '\$PATH' env" 12 | exit -1 13 | fi 14 | 15 | clang-format $@ 16 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/PyTorch/compile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | mkdir -p build 5 | cd build 6 | 7 | # same with the class_benchmark_demo.cc 8 | 9 | # DEMO_NAME=clas_benchmark 10 | DEMO_NAME=all 11 | if [ $# -ge 1 ]; then 12 | DEMO_NAME=$1 13 | fi 14 | 15 | LIB_DIR="/workspace/libtorch" 16 | if [ $# -ge 2 ]; then 17 | LIB_DIR=$2 18 | fi 19 | 20 | cmake ../src -DDEMO_NAME=${DEMO_NAME} -DTORCH_LIB=${LIB_DIR} 21 | 22 | make -j 23 | -------------------------------------------------------------------------------- /inference/inference_api_test/init_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH} 3 | export PATH=/opt/_internal/cpython-3.7.0/bin/:${PATH} 4 | export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.7.0/bin/python3.7 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.7.0/include/python3.7m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.7.0/lib/libpython3.7m.so" 5 | export PYTHON_ABI="cp37-cp37m" 6 | export PY_VERSION=3.7 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Paddle-Inference 测试代码 2 | 3 | ## 目录简介 4 | 5 | ```shell 6 | . 7 | |-- inference_api_test # 基于 Paddle-Inference API 测试功能 8 | | |-- cpp_api_test # 基于 cpp API 测试 9 | | `-- python_api_test # 基于 python API 测试 10 | |-- inference_benchmark # 基于 Paddle-Inference API 测试性能 11 | | |-- cc # 基于 cpp API 测试 12 | | |-- python # 基于 python API 测试 13 | | `-- README.md 14 | |-- inference_test_utils # 一些测试需要的工具 15 | | `-- README.md 16 | `-- README.md 17 | ``` 18 | -------------------------------------------------------------------------------- /models/args/conf/README: -------------------------------------------------------------------------------- 1 | 一、args_batch.conf 2 | 3 | 存放运行模型所必须的参数 4 | 5 | 1、文件格式: 6 | 7 | 路径\t执行文件xx.py\t执行功能\t模式 8 | 9 | 路径:模型在https://github.com/PaddlePaddle/models/ 中的路径 10 | 11 | 执行文件:运行模型的py文件 12 | 13 | 执行的功能:所要测试的功能,train、evaluate、infer等,与每个模型的test_args.py中的词典名相对应 14 | 15 | 模式:预留参数,默认设置为1 16 | 17 | 18 | 2、例如: 19 | 20 | PaddleNLP/emotion_detection run_classifier.py train_textcnn 1 21 | 22 | 表示测试模型及路径为PaddleNLP/emotion_detection,测试时将运行run_classifier.py,所测功能是train_textcnn 23 | -------------------------------------------------------------------------------- /inference/README.md: -------------------------------------------------------------------------------- 1 | # Paddle-Inference 测试代码 2 | 3 | ## 目录简介 4 | 5 | ```shell 6 | . 7 | |-- inference_api_test # 基于 Paddle-Inference API 测试功能 8 | | |-- cpp_api_test # 基于 cpp API 测试 9 | | `-- python_api_test # 基于 python API 测试 10 | |-- inference_benchmark # 基于 Paddle-Inference API 测试性能 11 | | |-- cc # 基于 cpp API 测试 12 | | |-- python # 基于 python API 测试 13 | | `-- README.md 14 | |-- inference_test_utils # 一些测试需要的工具 15 | | `-- README.md 16 | `-- README.md 17 | ``` 18 | 19 | 20 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/TensorFlow/compile.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | 3 | mkdir -p build 4 | cd build 5 | 6 | cmake ../src 7 | make -j4 8 | cd .. 9 | 10 | export CUDA_VISIBLE_DEVICES=3 11 | export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib64 12 | 13 | # XLA env 14 | # export TF_XLA_FLAGS="--tf_xla_cpu_global_jit" 15 | # export TF_XLA_FLAGS="--tf_xla_auto_jit=2" # GPU 16 | # export TF_XLA_FLAGS="--tf_xla_auto_jit=2 --tf_xla_cpu_global_jit" # CPU 17 | 18 | ./build/clas_benchmark --model_path=./mobilenet_v2_1.0_224_frozen.pb --repeats=1 -------------------------------------------------------------------------------- /models/args/PaddleNLP-/dialogue_system/auto_dialogue_evaluation/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROOT_PATH=$1 4 | 5 | #prepare data 6 | if [ -e args_test_data ] 7 | then 8 | echo "args_test_data has already existed" 9 | rm args_test_data 10 | fi 11 | ln -s ${ROOT_PATH}/data/PaddleNLP/dialogue_system/auto_dialogue_evaluation/data args_test_data 12 | 13 | #pwd 14 | #ls -l 15 | 16 | rm -rf args_test_inference_model 17 | mkdir args_test_inference_model 18 | 19 | rm -rf args_test_finetuned 20 | mkdir args_test_finetuned 21 | -------------------------------------------------------------------------------- /models/args/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | 5 | # clone models repo from gitbub 6 | if [ -e models ] 7 | then 8 | sudo /bin/rm -rf models 9 | fi 10 | #git clone https://github.com/PaddlePaddle/models.git 11 | git clone -b zytest https://github.com/zhengya01/models.git 12 | 13 | # clone Paddle repo from gitbub 14 | if [ -e Paddle ] 15 | then 16 | sudo /bin/rm -rf Paddle 17 | fi 18 | git clone https://github.com/PaddlePaddle/Paddle.git 19 | 20 | # get models which changed 21 | sh commit.sh `pwd` 22 | 23 | # run models in docker 24 | ./run_test `pwd` 25 | -------------------------------------------------------------------------------- /inference/inference_benchmark/python/Pytorch/README.md: -------------------------------------------------------------------------------- 1 | # Torch python性能测试 2 | 3 | ## 测试依赖 4 | ```shell 5 | import torch 6 | import torchvision 7 | import cv2 8 | import trtorch 9 | ``` 10 | torch测试时,会默认打开了mkldnn优化 11 | 12 | ## 快速测试 13 | 快速执行单个测试 14 | 15 | ```shell 16 | python torch_benchmark.py --model_name="resnet101" \ 17 | --input_shape"3,224,224" \ 18 | --batch_size=1 \ 19 | --repeat_times=1000 20 | ``` 21 | 22 | 设置MKL线程为1 23 | ```shell 24 | export OMP_NUM_THREADS=1 25 | export MKL_NUM_THREADS=1 26 | ``` 27 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/pretrain_language_models/XLNet/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #prepare data 4 | if [ -e args_test_data ] 5 | then 6 | echo "args_test_data has already existed" 7 | else 8 | ln -s /ssd3/models_test/models_args/PaddleNLP/PaddleLARK/XLNet/data args_test_data 9 | fi 10 | 11 | 12 | #prepare pre_model 13 | if [ -e xlnet_cased_L-12_H-768_A-12 ] 14 | then 15 | echo "xlnet_cased_L-12_H-768_A-12 has already existed" 16 | else 17 | ln -s /ssd3/models_test/models_args/PaddleNLP/PaddleLARK/XLNet/xlnet_cased_L-12_H-768_A-12 xlnet_cased_L-12_H-768_A-12 18 | fi 19 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/language_model/test_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | this is language_model args 4 | """ 5 | 6 | train = { 7 | "use_gpu": [True, False], 8 | "parallel": [True, False], 9 | "max_epoch": [1, 2], 10 | "batch_size": [32, 16], 11 | "model_type": ["test", "small"], 12 | "rnn_model": ["static", "padding"], 13 | "data_path": [ 14 | "args_test_data_1/simple-examples/data/", 15 | "args_test_data_2/simple-examples/data/" 16 | ], 17 | "save_model_dir": ["models_1", "models_2"], 18 | "log_path": ["tmp_log_file", None], 19 | } 20 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/pretrain_language_models/BERT/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #prepare data 4 | if [ -e args_test_data ] 5 | then 6 | echo "args_test_data has already existed" 7 | else 8 | ln -s /ssd3/models_test/models_args/PaddleNLP/PaddleLARK/BERT/data args_test_data 9 | fi 10 | 11 | 12 | #prepare pre_model 13 | if [ -e args_test_model ] 14 | then 15 | echo "args_test_model has already existed" 16 | else 17 | ln -s /ssd3/models_test/models_args/PaddleNLP/PaddleLARK/BERT/pretrain_model args_test_model 18 | fi 19 | 20 | 21 | rm -rf args_test_output 22 | mkdir args_test_output 23 | -------------------------------------------------------------------------------- /tipc/run.sh: -------------------------------------------------------------------------------- 1 | #REPO=$1 2 | #AGILE_PULL_ID=$2 3 | #AGILE_REVISION=$3 4 | ROOT_PATH=${ROOT_PATH:-/mnt/xly/work} 5 | 6 | work_dir=${ROOT_PATH}/${REPO} 7 | mkdir -p ${work_dir} 8 | cd ${work_dir} && rm -rf * 9 | 10 | # download latest tag paddle-wheel 11 | wget -q --no-proxy https://xly-devops.bj.bcebos.com/PR/Paddle/fullchain_ce_test/${AGILE_PULL_ID}/${AGILE_REVISION}/${REPO}.tar.gz 12 | 13 | tar -xpf ${REPO}.tar.gz 14 | cd Paddle 15 | 16 | 17 | #python -m pip install paddleseg 18 | cp continuous_integration/tipc/tipc.sh . 19 | #export TIPC_MODE="whole_train_whole_infer" 20 | export CHECK_LOSS=True 21 | sh tipc.sh ${REPO} 22 | -------------------------------------------------------------------------------- /distributed/dist_fleet/run.sh: -------------------------------------------------------------------------------- 1 | python dist_fleet_ctr.py --update_method pserver --role pserver --endpoints 127.0.0.1:9121,127.0.0.1:9122 --current_id 0 --trainers 2 > ./ps0.log 2>&1 & 2 | python dist_fleet_ctr.py --update_method pserver --role pserver --endpoints 127.0.0.1:9121,127.0.0.1:9122 --current_id 1 --trainers 2 > ./ps1.log 2>&1 & 3 | python dist_fleet_ctr.py --update_method pserver --role trainer --endpoints 127.0.0.1:9121,127.0.0.1:9122 --current_id 0 --trainers 2 > ./tr0.log 2>&1 & 4 | python dist_fleet_ctr.py --update_method pserver --role trainer --endpoints 127.0.0.1:9121,127.0.0.1:9122 --current_id 1 --trainers 2 > ./tr1.log 2>&1 & 5 | -------------------------------------------------------------------------------- /tipc/check_loss.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | push_file=/home/work/bce-client/BosClient.py 4 | 5 | logfiles=`find test_tipc -name "*train.log"` 6 | for log in $logfiles; do 7 | verify_file=$(echo $log | tr '/' '^') 8 | verify_file=$(echo $verify_file | xargs) 9 | verify_file=${verify_file}".loss" 10 | if wget -q https://paddle-qa.bj.bcebos.com/fullchain_ce_loss/$verify_file; then 11 | grep "\[Train\].*\[Avg\]" $log > losslog 12 | python check_loss.py $verify_file losslog $log | tee -a loss.result 13 | else 14 | grep "\[Train\].*\[Avg\]" $log > $verify_file 15 | python2 ${push_file} $verify_file paddle-qa/fullchain_ce_loss 16 | fi 17 | done 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: cpp 2 | sudo: required 3 | dist: trusty 4 | services: 5 | - docker 6 | os: 7 | - linux 8 | env: 9 | - JOB=check_style 10 | addons: 11 | ssh_known_hosts: 13.229.163.131 12 | before_install: 13 | # For pylint dockstring checker 14 | - sudo pip install pre-commit pylint pytest astroid isort 15 | - | 16 | function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; } 17 | script: 18 | - "travis_wait 30 sleep 1800 &" 19 | - | 20 | # 43min timeout 21 | pre-commit install 22 | pre-commit run -a 23 | if [ $? -eq 0 ]; then true; else exit 1; fi; 24 | notifications: 25 | email: 26 | on_success: change 27 | on_failure: always 28 | -------------------------------------------------------------------------------- /distributed/dist_fleet/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /models/args/commit.sh: -------------------------------------------------------------------------------- 1 | cp conf/args_batch.conf models/ 2 | cp models.py models/ 3 | cd models 4 | git rev-parse HEAD >commit_id 5 | git rev-parse HEAD^^^ >commit_id_head 6 | git diff `cat commit_id_head` | grep "diff --git" | awk -F ' b/' '{print $2}' >change_info 7 | #git rev-parse HEAD 8 | #git rev-parse HEAD^^^ 9 | #git diff `git rev-parse HEAD^^^` | grep "diff --git" | awk -F ' b/' '{print $2}' 10 | #git diff `cat commit_id_head` | grep "diff --git" | awk -F ' b/' '{print $2}' 11 | 12 | #python commit.py commit_info >change_info 13 | python models.py change_info args_batch.conf >models_info 14 | 15 | cat models_info 16 | 17 | cp models_info ../conf/changed_models.conf 18 | ls -l ../conf/ 19 | cd - 20 | -------------------------------------------------------------------------------- /inference/inference_benchmark/python/Paddle/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/image_classfication/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/image_classfication/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /models/args/commit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | 5 | 6 | def check_models(commit_file): 7 | """ 8 | """ 9 | models = set() 10 | commit = [] 11 | with open(commit_file, 'r') as fin: 12 | commit = fin.readlines() 13 | commit.reverse() 14 | for item in commit[1:]: 15 | line = item.strip() 16 | if line == "": 17 | break 18 | get_models(line.split()[0], models) 19 | return models 20 | 21 | 22 | def get_models(line, models): 23 | """ 24 | """ 25 | # check line 26 | model = line 27 | models.add(model) 28 | 29 | 30 | if __name__ == "__main__": 31 | commit_file = sys.argv[1] 32 | check_models(commit_file) 33 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/dialogue_system/dialogue_general_understanding/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROOT_PATH=$1 4 | 5 | if [ -e data ] 6 | then 7 | mv data data.bak 8 | fi 9 | if [ ! -e data.tgz ] 10 | then 11 | wget https://sys-p0.bj.bcebos.com/models/PaddleNLP/dialogue_system/dialogue_general_understanding/data.tgz --no-check-certificate 12 | fi 13 | tar -zxf data.tgz 14 | 15 | #prepare data 16 | if [ -e args_test_data ] 17 | then 18 | echo "args_test_data has already existed" 19 | else 20 | ln -s data args_test_data 21 | fi 22 | 23 | rm -rf args_test_save_model 24 | mkdir args_test_save_model 25 | 26 | rm -rf args_test_inference_model 27 | mkdir args_test_inference_model 28 | 29 | #prepare pre_model 30 | 31 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/dialogue_domain_classification/test_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | this is dialogue_domain_classification args 4 | """ 5 | 6 | train = { 7 | "use_cuda": [True, False], 8 | "do_train": [True], 9 | "do_eval": [False, True], 10 | "do_test": [False, True], 11 | "build_dict": [False], 12 | "data_dir": ['./data/input/'], 13 | "save_dir": ['./data/output/'], 14 | "config_path": ['./data/input/model.conf'], 15 | "batch_size": [64, 32], 16 | "max_seq_len": [50, 60], 17 | "checkpoints": ['checkpoints'], 18 | "init_checkpoint": [None], 19 | "skip_steps": [10, 100], 20 | "cpu_num": [3], 21 | "validation_steps": [100, 200], 22 | "learning_rate": [0.1, 0.05], 23 | } 24 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/seq2seq/variational_seq2seq/test_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | this is variational_seq2seq args 4 | """ 5 | 6 | train = { 7 | "vocab_size": [10003], 8 | "batch_size": [32, 16], 9 | "init_scale": [0.1, 0.2], 10 | "max_grad_norm": [5.0, 4.9], 11 | "dataset_prefix": ["data/ptb/ptb", "data/swda/swda"], 12 | "model_path": ["ptb_model", "swda_model"], 13 | "use_gpu": [True], 14 | "max_epoch": [1, 2], 15 | } 16 | 17 | infer = { 18 | "vocab_size": [10003], 19 | "batch_size": [32, 16], 20 | "init_scale": [0.1, 0.2], 21 | "max_grad_norm": [5.0, 4.9], 22 | "dataset_prefix": ["data/ptb/ptb", "data/swda/swda"], 23 | "use_gpu": [True], 24 | "reload_model": ["ptb_model/epoch_0", "swda_model/epoch_0"], 25 | } 26 | -------------------------------------------------------------------------------- /tools/codestyle/pylint_pre_commit.hook: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TOTAL_ERRORS=0 4 | 5 | 6 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | export PYTHONPATH=$DIR:$PYTHONPATH 8 | 9 | # The trick to remove deleted files: https://stackoverflow.com/a/2413151 10 | for file in $(git diff --name-status | grep .py | grep -v thirdparty | grep -v framework_api | awk '$1 != "D" {print $2}'); do 11 | echo $file 12 | pylint --disable=all --load-plugins=docstring_checker \ 13 | --enable=doc-string-one-line,doc-string-end-with,doc-string-with-all-args,doc-string-triple-quotes,doc-string-missing,doc-string-indent-error,doc-string-with-returns,doc-string-with-raises $file; 14 | TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); 15 | done 16 | 17 | exit $TOTAL_ERRORS 18 | #For now, just warning: 19 | #exit 0 20 | 21 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/dialogue_system/auto_dialogue_evaluation/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROOT_PATH=$1 4 | 5 | if [ -e data ] 6 | then 7 | mv data data.bak 8 | fi 9 | if [ ! -e data.tgz ] 10 | then 11 | wget https://sys-p0.bj.bcebos.com/models/PaddleNLP/dialogue_system/auto_dialogue_evaluation/data.tgz --no-check-certificate 12 | fi 13 | tar -zxf data.tgz 14 | 15 | #prepare data 16 | if [ -e args_test_data ] 17 | then 18 | echo "args_test_data has already existed" 19 | rm args_test_data 20 | fi 21 | #ln -s ${ROOT_PATH}/data/PaddleNLP/dialogue_system/auto_dialogue_evaluation/data args_test_data 22 | ln -s data args_test_data 23 | 24 | #pwd 25 | #ls -l 26 | 27 | rm -rf args_test_inference_model 28 | mkdir args_test_inference_model 29 | 30 | rm -rf args_test_finetuned 31 | mkdir args_test_finetuned 32 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/emotion_detection/run_prepare.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/sh 3 | 4 | #prepare data 5 | if [ -e args_test_data ] 6 | then 7 | echo "args_test_data has already existed" 8 | else 9 | ln -s /ssd3/models_test/models_args/PaddleNLP/emotion_detection/data args_test_data 10 | fi 11 | 12 | #prepare pre_model 13 | if [ -e args_test_models ] 14 | then 15 | echo "models has already existed" 16 | else 17 | ln -s /ssd3/models_test/models_args/PaddleNLP/emotion_detection/pretrain_models args_test_models 18 | fi 19 | 20 | # 21 | if [ -e args_test_output_1 ] 22 | then 23 | echo "args_test_output_1 has already existed" 24 | else 25 | mkdir args_test_output_1 26 | fi 27 | 28 | if [ -e args_test_output_2 ] 29 | then 30 | echo "args_test_output_2 has already existed" 31 | else 32 | mkdir args_test_output_2 33 | fi 34 | -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/image_classfication/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from .vgg import VGG11, VGG13, VGG16, VGG19 16 | from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152 17 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/language_model/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROOT_PATH=$1 4 | 5 | #prepare data 6 | 7 | if [ -e data ] 8 | then 9 | mv data data.bak 10 | fi 11 | if [ ! -e data.tgz ] 12 | then 13 | wget https://sys-p0.bj.bcebos.com/models/PaddleNLP/language_model/data.tgz --no-check-certificate 14 | fi 15 | tar -zxf data.tgz 16 | 17 | 18 | if [ -e args_test_data_1 ] 19 | then 20 | echo "args_test_data_1 has already existed" 21 | else 22 | ln -s data args_test_data_1 23 | fi 24 | if [ -e args_test_data_2 ] 25 | then 26 | echo "args_test_data_2 has already existed" 27 | else 28 | ln -s data args_test_data_2 29 | fi 30 | 31 | if [ -e models_1 ] 32 | then 33 | echo "models_1 has already existed" 34 | else 35 | mkdir models_1 36 | cd models_1 37 | mkdir 0 1 2 3 4 5 38 | fi 39 | 40 | #prepare pre_model 41 | 42 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/machine_translation/transformer/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROOT_PATH=$1 4 | 5 | if [ -e data ] 6 | then 7 | mv data data.bak 8 | fi 9 | if [ ! -e data.tgz ] 10 | then 11 | wget https://sys-p0.bj.bcebos.com/models/PaddleNLP/machine_translation/transformer/data.tgz --no-check-certificate 12 | fi 13 | tar -zxf data.tgz 14 | 15 | #prepare data 16 | if [ -e args_test_data ] 17 | then 18 | echo "args_test_data has already existed" 19 | else 20 | ln -s data args_test_data 21 | fi 22 | 23 | rm -rf args_test_inference_model 24 | mkdir args_test_inference_model 25 | 26 | rm -rf args_test_finetuned 27 | mkdir args_test_finetuned 28 | 29 | #prepare pre_model 30 | 31 | if [ -e args_test_model ] 32 | then 33 | echo "args_test_model has already existed" 34 | else 35 | ln -s data/args_test_model args_test_model 36 | fi 37 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_cos.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test cos 6 | """ 7 | from apibase import APIBase 8 | import paddle 9 | import pytest 10 | import numpy as np 11 | 12 | 13 | class TestCos(APIBase): 14 | """ 15 | test 16 | """ 17 | 18 | def hook(self): 19 | """ 20 | implement 21 | """ 22 | self.types = [np.float32, np.float64] 23 | # self.debug = True 24 | # self.static = True 25 | # enable check grad 26 | # self.enable_backward = True 27 | 28 | 29 | obj = TestCos(paddle.cos) 30 | 31 | 32 | @pytest.mark.api_base_cos_vartype 33 | def test_cos_base(): 34 | """ 35 | base 36 | """ 37 | x = -1 + 2 * np.random.random(size=[3, 3, 3]) 38 | res = np.cos(x) 39 | obj.base(res=res, x=x) 40 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_atan.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test atan 6 | """ 7 | from apibase import APIBase 8 | import paddle 9 | import pytest 10 | import numpy as np 11 | 12 | 13 | class TestAtan(APIBase): 14 | """ 15 | test 16 | """ 17 | 18 | def hook(self): 19 | """ 20 | implement 21 | """ 22 | self.types = [np.float32, np.float64] 23 | # self.debug = True 24 | # self.static = True 25 | # enable check grad 26 | # self.enable_backward = True 27 | 28 | 29 | obj = TestAtan(paddle.atan) 30 | 31 | 32 | @pytest.mark.api_base_atan_vartype 33 | def test_atan_base(): 34 | """ 35 | base 36 | """ 37 | x = -1 + 2 * np.random.random(size=[3, 3, 3]) 38 | res = np.arctan(x) 39 | obj.base(res=res, x=x) 40 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_ceil.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test ceil 6 | """ 7 | from apibase import APIBase 8 | import paddle 9 | import pytest 10 | import numpy as np 11 | 12 | 13 | class TestCeil(APIBase): 14 | """ 15 | test 16 | """ 17 | 18 | def hook(self): 19 | """ 20 | implement 21 | """ 22 | self.types = [np.float32, np.float64] 23 | # self.debug = True 24 | # self.static = True 25 | # enable check grad 26 | # self.enable_backward = True 27 | 28 | 29 | obj = TestCeil(paddle.ceil) 30 | 31 | 32 | @pytest.mark.api_base_ceil_vartype 33 | def test_ceil_base(): 34 | """ 35 | base 36 | """ 37 | x = -1 + 2 * np.random.random(size=[3, 3, 3]) 38 | res = np.ceil(x) 39 | obj.base(res=res, x=x) 40 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_asin.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test asin 6 | """ 7 | from apibase import APIBase 8 | import paddle 9 | import pytest 10 | import numpy as np 11 | 12 | 13 | class TestAsin(APIBase): 14 | """ 15 | test asin 16 | """ 17 | 18 | def hook(self): 19 | """ 20 | implement 21 | """ 22 | self.types = [np.float32, np.float64] 23 | # self.debug = True 24 | # self.static = True 25 | # enable check grad 26 | # self.enable_backward = True 27 | 28 | 29 | obj = TestAsin(paddle.asin) 30 | 31 | 32 | @pytest.mark.api_base_asin_vartype 33 | def test_asin_base(): 34 | """ 35 | base 36 | """ 37 | x = -1 + 2 * np.random.random(size=[3, 3, 3]) 38 | res = np.arcsin(x) 39 | obj.base(res=res, x=x) 40 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_cosh.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test cosh 6 | """ 7 | from apibase import APIBase 8 | import paddle 9 | import pytest 10 | import numpy as np 11 | 12 | 13 | class TestCosh(APIBase): 14 | """ 15 | test 16 | """ 17 | 18 | def hook(self): 19 | """ 20 | implement 21 | """ 22 | self.types = [np.float32, np.float64] 23 | # self.debug = True 24 | # self.static = True 25 | # enable check grad 26 | # self.enable_backward = True 27 | 28 | 29 | obj = TestCosh(paddle.cosh) 30 | 31 | 32 | @pytest.mark.api_base_cosh_vartype 33 | def test_cosh_base(): 34 | """ 35 | base 36 | """ 37 | x = -1 + 2 * np.random.random(size=[3, 3, 3]) 38 | res = (np.exp(x) + np.exp(-x)) / 2 39 | obj.base(res=res, x=x) 40 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/resnet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | echo "Test starting... resnet50 thread 4 batch_size 4 GPU" 5 | $OUTPUT_BIN/test_resnet50 --infer_model=$DATA_ROOT/c++/resnet50/model \ 6 | --infer_data=$DATA_ROOT/c++/resnet50/data/data.bin \ 7 | --batch_size=4 --num_threads=4 \ 8 | --repeat=3 --use_gpu=true \ 9 | --gtest_output=xml:test_resnet50_fluid_gpu.xml 10 | 11 | echo "Test starting... resnet50 thread 4 batch_size 4 CPU" 12 | $OUTPUT_BIN/test_resnet50 --infer_model=$DATA_ROOT/c++/resnet50/model \ 13 | --infer_data=$DATA_ROOT/c++/resnet50/data/data.bin \ 14 | --batch_size=4 --num_threads=4 \ 15 | --repeat=3 --use_gpu=false \ 16 | --gtest_output=xml:test_resnet50_fluid_cpu.xml 17 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/PyTorch/src/external-cmake/glog.cmake: -------------------------------------------------------------------------------- 1 | include(ExternalProject) 2 | 3 | set(GLOG_ROOT ${CMAKE_BINARY_DIR}/3rdparty/glog) 4 | set(GLOG_LIB_DIR ${GLOG_ROOT}/lib) 5 | set(GLOG_INCLUDE_DIR ${GLOG_ROOT}/include) 6 | 7 | set(GLOG_URL https://github.com/google/glog/archive/d516278b1cd33cd148e8989aec488b6049a4ca0b.zip) 8 | set(GLOG_CONFIGURE cd ${GLOG_ROOT}/src/glog && cmake -DCMAKE_INSTALL_PREFIX=${GLOG_ROOT} -DBUILD_SHARED_LIBS=ON) 9 | set(GLOG_MAKE cd ${GLOG_ROOT}/src/glog && make) 10 | set(GLOG_INSTALL cd ${GLOG_ROOT}/src/glog && make install) 11 | 12 | ExternalProject_Add(glog 13 | URL ${GLOG_URL} 14 | DOWNLOAD_NAME glog-dev 15 | PREFIX ${GLOG_ROOT} 16 | CONFIGURE_COMMAND ${GLOG_CONFIGURE} 17 | BUILD_COMMAND ${GLOG_MAKE} 18 | INSTALL_COMMAND ${GLOG_INSTALL} 19 | ) 20 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/bert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | echo "Test starting... bert thread 4 batch_size 4 use gpu fluid" 5 | $OUTPUT_BIN/test_bert --infer_model=$DATA_ROOT/cpp-model-infer/bert_emb128/model \ 6 | --infer_data=$DATA_ROOT/cpp-model-infer/bert_emb128/data/data.txt \ 7 | --repeat=3 --num_threads=4 \ 8 | --use_gpu=true \ 9 | --batch_size=4 --gtest_output=xml:test_bert_gpu.xml 10 | 11 | echo "Test starting... bert thread 4 batch_size 4 use cpu fluid" 12 | $OUTPUT_BIN/test_bert --infer_model=$DATA_ROOT/cpp-model-infer/bert_emb128/model \ 13 | --infer_data=$DATA_ROOT/cpp-model-infer/bert_emb128/data/data.txt \ 14 | --repeat=3 --num_threads=4 \ 15 | --use_gpu=false \ 16 | --batch_size=4 --gtest_output=xml:test_bert_cpu.xml 17 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/TensorFlow/src/external-cmake/glog.cmake: -------------------------------------------------------------------------------- 1 | include(ExternalProject) 2 | 3 | set(GLOG_ROOT ${CMAKE_BINARY_DIR}/3rdparty/glog) 4 | set(GLOG_LIB_DIR ${GLOG_ROOT}/lib) 5 | set(GLOG_INCLUDE_DIR ${GLOG_ROOT}/include) 6 | 7 | set(GLOG_URL https://github.com/google/glog/archive/d516278b1cd33cd148e8989aec488b6049a4ca0b.zip) 8 | set(GLOG_CONFIGURE cd ${GLOG_ROOT}/src/glog && cmake -DCMAKE_INSTALL_PREFIX=${GLOG_ROOT} -DBUILD_SHARED_LIBS=ON) 9 | set(GLOG_MAKE cd ${GLOG_ROOT}/src/glog && make) 10 | set(GLOG_INSTALL cd ${GLOG_ROOT}/src/glog && make install) 11 | 12 | ExternalProject_Add(glog 13 | URL ${GLOG_URL} 14 | DOWNLOAD_NAME glog-dev 15 | PREFIX ${GLOG_ROOT} 16 | CONFIGURE_COMMAND ${GLOG_CONFIGURE} 17 | BUILD_COMMAND ${GLOG_MAKE} 18 | INSTALL_COMMAND ${GLOG_INSTALL} 19 | ) 20 | -------------------------------------------------------------------------------- /tools/codestyle/cpplint_pre_commit.hook: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TOTAL_ERRORS=0 4 | if [[ ! $TRAVIS_BRANCH ]]; then 5 | # install cpplint on local machine. 6 | if [[ ! $(which cpplint) ]]; then 7 | pip install cpplint 8 | fi 9 | # diff files on local machine. 10 | files=$(git diff --cached --name-status | awk '$1 != "D" {print $2}') 11 | else 12 | # diff files between PR and latest commit on Travis CI. 13 | branch_ref=$(git rev-parse "$TRAVIS_BRANCH") 14 | head_ref=$(git rev-parse HEAD) 15 | files=$(git diff --name-status $branch_ref $head_ref | awk '$1 != "D" {print $2}') 16 | fi 17 | # The trick to remove deleted files: https://stackoverflow.com/a/2413151 18 | for file in $files; do 19 | if [[ $file =~ ^(patches/.*) ]]; then 20 | continue; 21 | else 22 | cpplint --filter=-readability/fn_size $file; 23 | TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); 24 | fi 25 | done 26 | 27 | exit $TOTAL_ERRORS 28 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | def flip(im): 17 | """ 18 | Return the flipped image. 19 | Flip an image along the horizontal direction. 20 | im: input image, (K x H x W) ndarrays 21 | """ 22 | if len(im.shape) == 3: 23 | return im[:, :, ::-1] 24 | else: 25 | return im[:, ::-1] 26 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_acos.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test acos 6 | """ 7 | from apibase import APIBase 8 | from apibase import randtool 9 | import paddle 10 | import pytest 11 | import numpy as np 12 | 13 | 14 | class TestAcos(APIBase): 15 | """ 16 | test acos 17 | """ 18 | 19 | def hook(self): 20 | """ 21 | implement 22 | """ 23 | self.types = [np.float32, np.float64] 24 | # self.rtol = 0.1 25 | # self.debug = True 26 | # self.static = True 27 | # enable check grad 28 | # self.enable_backward = True 29 | 30 | 31 | obj = TestAcos(paddle.acos) 32 | 33 | 34 | @pytest.mark.api_base_abs_vartype 35 | def test_acos_base(): 36 | """ 37 | base 38 | """ 39 | x = randtool("float", -1, 1, (3, 3, 3)) 40 | res = np.arccos(x) 41 | obj.base(res=res, x=x) 42 | -------------------------------------------------------------------------------- /distributed/dist_fleet/thirdparty/pslib/dataset_generator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | # @Time : 2019-12-25 20:08 4 | # @Author : liyang109 5 | import paddle.fluid.incubate.data_generator as dg 6 | 7 | 8 | class MyDataset(dg.MultiSlotDataGenerator): 9 | """ 10 | user defined dataset 11 | """ 12 | 13 | # process each single line 14 | def generate_sample(self, line): 15 | """ 16 | input: a single line 17 | output: each parsed instance 18 | """ 19 | 20 | def data_iter(): 21 | """ 22 | the "real" parse function 23 | """ 24 | tokens = line.split(',') 25 | output = [("click", [int(tokens[1])]), ("feature", [])] 26 | for token in tokens[2:]: 27 | output[1][1].append(int(token)) 28 | yield output 29 | 30 | return data_iter 31 | 32 | 33 | d = MyDataset() 34 | d.run_from_stdin() 35 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/PyTorch/src/external-cmake/gflag.cmake: -------------------------------------------------------------------------------- 1 | include(ExternalProject) 2 | 3 | set(GFLAG_ROOT ${CMAKE_BINARY_DIR}/3rdparty/gflag-2.2.2) 4 | set(GFLAG_LIB_DIR ${GFLAG_ROOT}/lib) 5 | set(GFLAG_INCLUDE_DIR ${GFLAG_ROOT}/include) 6 | 7 | set(GFLAG_URL https://github.com/gflags/gflags/archive/v2.2.2.zip) 8 | set(GFLAG_CONFIGURE cd ${GFLAG_ROOT}/src/gflag-2.2.2 && cmake -DCMAKE_INSTALL_PREFIX=${GFLAG_ROOT} -DBUILD_SHARED_LIBS=ON) 9 | set(GFLAG_MAKE cd ${GFLAG_ROOT}/src/gflag-2.2.2 && make) 10 | set(GFLAG_INSTALL cd ${GFLAG_ROOT}/src/gflag-2.2.2 && make install) 11 | 12 | ExternalProject_Add(gflag-2.2.2 13 | URL ${GFLAG_URL} 14 | DOWNLOAD_NAME gflag-2.2.2.zip 15 | PREFIX ${GFLAG_ROOT} 16 | CONFIGURE_COMMAND ${GFLAG_CONFIGURE} 17 | BUILD_COMMAND ${GFLAG_MAKE} 18 | INSTALL_COMMAND ${GFLAG_INSTALL} 19 | ) 20 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/TensorFlow/src/external-cmake/gflag.cmake: -------------------------------------------------------------------------------- 1 | include(ExternalProject) 2 | 3 | set(GFLAG_ROOT ${CMAKE_BINARY_DIR}/3rdparty/gflag-2.2.2) 4 | set(GFLAG_LIB_DIR ${GFLAG_ROOT}/lib) 5 | set(GFLAG_INCLUDE_DIR ${GFLAG_ROOT}/include) 6 | 7 | set(GFLAG_URL https://github.com/gflags/gflags/archive/v2.2.2.zip) 8 | set(GFLAG_CONFIGURE cd ${GFLAG_ROOT}/src/gflag-2.2.2 && cmake -DCMAKE_INSTALL_PREFIX=${GFLAG_ROOT} -DBUILD_SHARED_LIBS=ON) 9 | set(GFLAG_MAKE cd ${GFLAG_ROOT}/src/gflag-2.2.2 && make) 10 | set(GFLAG_INSTALL cd ${GFLAG_ROOT}/src/gflag-2.2.2 && make install) 11 | 12 | ExternalProject_Add(gflag-2.2.2 13 | URL ${GFLAG_URL} 14 | DOWNLOAD_NAME gflag-2.2.2.zip 15 | PREFIX ${GFLAG_ROOT} 16 | CONFIGURE_COMMAND ${GFLAG_CONFIGURE} 17 | BUILD_COMMAND ${GFLAG_MAKE} 18 | INSTALL_COMMAND ${GFLAG_INSTALL} 19 | ) 20 | -------------------------------------------------------------------------------- /test/tools/tool-test-inference/test_case/text_preprocess.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 3 | """ 4 | text preprocess 5 | """ 6 | 7 | 8 | def ernie_data(data_path): 9 | """ 10 | ernie data text to list(array). 11 | Args: 12 | data_path(str): data path 13 | Returns: 14 | examples(list): array in list 15 | """ 16 | import paddlenlp as ppnlp # 仅nlp case会触发载入 17 | max_seq_length = 128 18 | examples = [] 19 | with open(data_path, "r", encoding="utf-8") as f: 20 | data = f.readlines() 21 | tokenizer = ppnlp.transformers.ErnieTinyTokenizer.from_pretrained("ernie-tiny") 22 | for text in data: 23 | encoded_inputs = tokenizer(text=text, max_seq_len=max_seq_length) 24 | input_ids = encoded_inputs["input_ids"] 25 | token_type_ids = encoded_inputs["token_type_ids"] 26 | examples.append((input_ids, token_type_ids)) 27 | f.close 28 | return examples 29 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-algorithm-correctness/test_case/text_preprocess.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 3 | """ 4 | text preprocess 5 | """ 6 | 7 | 8 | def ernie_data(data_path): 9 | """ 10 | ernie data text to list(array). 11 | Args: 12 | data_path(str): data path 13 | Returns: 14 | examples(list): array in list 15 | """ 16 | import paddlenlp as ppnlp # 仅nlp case会触发载入 17 | max_seq_length = 128 18 | examples = [] 19 | with open(data_path, "r", encoding="utf-8") as f: 20 | data = f.readlines() 21 | tokenizer = ppnlp.transformers.ErnieTinyTokenizer.from_pretrained("ernie-tiny") 22 | for text in data: 23 | encoded_inputs = tokenizer(text=text, max_seq_len=max_seq_length) 24 | input_ids = encoded_inputs["input_ids"] 25 | token_type_ids = encoded_inputs["token_type_ids"] 26 | examples.append((input_ids, token_type_ids)) 27 | f.close 28 | return examples 29 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/lexical_analysis/run_prepare.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/sh 3 | 4 | #prepare data 5 | if [ -e args_test_data ] 6 | then 7 | echo "args_test_data has already existed" 8 | else 9 | ln -s /ssd3/models_test/models_args/PaddleNLP/lexical_analysis/data args_test_data 10 | fi 11 | 12 | #prepare pre_model 13 | if [ -e args_test_pretrained ] 14 | then 15 | echo "args_test_pretrained has already existed" 16 | else 17 | ln -s /ssd3/models_test/models_args/PaddleNLP/lexical_analysis/pretrained args_test_pretrained 18 | fi 19 | 20 | if [ -e args_test_model_baseline ] 21 | then 22 | echo "args_test_model_baseline has already existed" 23 | else 24 | ln -s /ssd3/models_test/models_args/PaddleNLP/lexical_analysis/model_baseline.pdckpt args_test_model_baseline.pdckpt 25 | fi 26 | 27 | if [ -e args_test_model_finetuned ] 28 | then 29 | echo "args_test_model_finetuned has already existed" 30 | else 31 | ln -s /ssd3/models_test/models_args/PaddleNLP/lexical_analysis/model_finetuned args_test_model_finetuned 32 | fi 33 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/dialogue_domain_classification/run_prepare.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/sh 3 | 4 | ROOT_PATH=$1 5 | 6 | 7 | #prepare data 8 | if [ -e data/input ] 9 | then 10 | echo "args_test_data has already existed" 11 | else 12 | mkdir -p data/input 13 | if [ ! -e dialogue_domain_classification-dataset-1.0.0.tar.gz ] 14 | then 15 | echo "download" 16 | wget --no-check-certificate https://baidu-nlp.bj.bcebos.com/dialogue_domain_classification-dataset-1.0.0.tar.gz 17 | fi 18 | tar -zxf dialogue_domain_classification-dataset-1.0.0.tar.gz -C ./data/input 19 | fi 20 | 21 | #prepare pre_model 22 | if [ -e model ] 23 | then 24 | echo "model has already existed" 25 | else 26 | mkdir -p model 27 | if [ ! -e dialogue_domain_classification-model-1.0.0.tar.gz ] 28 | then 29 | echo "download" 30 | wget --no-check-certificate https://baidu-nlp.bj.bcebos.com/dialogue_domain_classification-model-1.0.0.tar.gz 31 | fi 32 | tar -zxf dialogue_domain_classification-model-1.0.0.tar.gz -C ./model 33 | fi 34 | -------------------------------------------------------------------------------- /test/tools/tool-test-dl-net/tool.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | tpaddle tool.py 6 | """ 7 | import argparse 8 | import paddle 9 | import paddle.vision.models as models 10 | 11 | 12 | parser = argparse.ArgumentParser(__doc__) 13 | parser.add_argument( 14 | "--model_name", type=str, default=None, help="model name." 15 | ) 16 | args = parser.parse_args() 17 | 18 | models_zoo = {'mobilenetv1': models.MobileNetV1, 19 | 'mobilenetv2': models.MobileNetV2, 20 | 'resnet18': models.resnet18, 21 | 'resnet34': models.resnet34, 22 | 'resnet50': models.resnet50, 23 | 'resnet101': models.resnet101, 24 | 'vgg11': models.vgg11, 25 | 'vgg13': models.vgg13, 26 | 'vgg16': models.vgg16, 27 | 'vgg19': models.vgg19} 28 | 29 | if __name__ == "__main__": 30 | model = models_zoo[args.model_name] 31 | model = model() 32 | paddle.summary(model, (-1, 3, 224, 224)) 33 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-case.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | 13 | mkdir -p $DATA_ROOT 14 | cd $DATA_ROOT 15 | if [ ! -f c++/resnet50/model/__model__ ]; then 16 | echo "==== Download data and models ====" 17 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/inference/c++-infer.tgz --no-check-certificate 18 | tar -zxf c++-infer.tgz 19 | fi 20 | 21 | if [ ! -f cpp-model-infer/bert_emb128/model/__model__ ]; then 22 | echo "==== Download bert, ocr, text data and models ====" 23 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/inference/cpp-model-infer.tgz --no-check-certificate 24 | tar -zxf cpp-model-infer.tgz 25 | fi 26 | 27 | cd - 28 | 29 | bash $CASE_ROOT/resnet.sh 30 | 31 | bash $CASE_ROOT/ocr.sh 32 | bash $CASE_ROOT/text_classification.sh 33 | bash $CASE_ROOT/bert.sh 34 | -------------------------------------------------------------------------------- /models/args/conf/args_batch.conf: -------------------------------------------------------------------------------- 1 | PaddleNLP/dialogue_system/dialogue_general_understanding main.py train 1 2 | PaddleNLP/dialogue_system/dialogue_general_understanding main.py predict 1 3 | PaddleNLP/dialogue_system/dialogue_general_understanding main.py infer 1 4 | PaddleNLP/similarity_net run_classifier.py train 1 5 | PaddleNLP/similarity_net run_classifier.py evaluate 1 6 | PaddleNLP/similarity_net run_classifier.py infer 1 7 | PaddleNLP/dialogue_system/auto_dialogue_evaluation main.py pretrain 1 8 | PaddleNLP/dialogue_system/auto_dialogue_evaluation main.py finetune 1 9 | PaddleNLP/machine_translation/transformer main.py train 1 10 | PaddleNLP/machine_translation/transformer main.py predict 1 11 | PaddleNLP/pretrain_language_models/ELMo train.py train 1 12 | PaddleNLP/dialogue_domain_classification run_classifier.py train 1 13 | PaddleNLP/seq2seq/seq2seq train.py train 1 14 | PaddleNLP/seq2seq/seq2seq infer.py infer 1 15 | PaddleNLP/seq2seq/variational_seq2seq train.py train 1 16 | PaddleNLP/seq2seq/variational_seq2seq infer.py infer 1 17 | -------------------------------------------------------------------------------- /models/args/models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | 6 | 7 | def check_models(models_file, register_file): 8 | """ 9 | """ 10 | models = set() 11 | register = [] 12 | with open(register_file, 'r') as fin: 13 | for line in fin: 14 | register.append(line.strip()) 15 | models.add(line.strip().split()[0]) 16 | 17 | #print(models) 18 | #print(register) 19 | commit = set() 20 | with open(models_file, 'r') as fin: 21 | for line in fin: 22 | tmp = line.strip() 23 | for item in models: 24 | if item in tmp: 25 | commit.add(item) 26 | #print(commit) 27 | test_case = [] 28 | for item in register: 29 | if item.split()[0] in commit: 30 | test_case.append(item) 31 | print(item) 32 | 33 | #print(test_case) 34 | return test_case 35 | 36 | 37 | if __name__ == "__main__": 38 | models_file = sys.argv[1] 39 | register_file = sys.argv[2] 40 | check_models(models_file, register_file) 41 | -------------------------------------------------------------------------------- /tipc/tipc_run_cpp.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | 4 | test_mode=${TIPC_MODE:-lite_train_lite_infer} 5 | test_mode=$(echo $test_mode | tr "," "\n") 6 | 7 | 8 | cd deploy/cpp_infer 9 | mkdir -p Paddle/build 10 | wget --no-proxy https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.2_cudnn8.1.1_trt7.2.3.4/paddle_inference.tgz 11 | tar -zxf paddle_inference.tgz 12 | mv paddle_inference Paddle/build/paddle_inference_install_dir 13 | cd - 14 | 15 | 16 | for config_file in `find . -name "*_infer_cpp_*.txt"`; do 17 | for mode in $test_mode; do 18 | mode=$(echo $mode | xargs) 19 | echo "==START=="$config_file"_"$mode 20 | echo "CUDA_VISIBLE_DEVICES="$CUDA_VISIBLE_DEVICES 21 | 22 | export http_proxy=http://172.19.57.45:3128 23 | export https_proxy=http://172.19.57.45:3128 24 | export no_proxy=baidubce.com,bcebos.com 25 | bash -ex test_tipc/prepare.sh $config_file "cpp_infer" 26 | bash -ex test_tipc/test_inference_cpp.sh $config_file 27 | echo "==END=="$config_file"_"$mode 28 | done 29 | done 30 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/sentiment_classification/run_prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | pip install numpy 4 | 5 | #prepare data 6 | if [ -e args_test_senta_data ] 7 | then 8 | echo "args_test_senta_data has already existed" 9 | else 10 | ln -s /ssd3/models_test/models_args/PaddleNLP/sentiment_classification/senta_data args_test_senta_data 11 | fi 12 | if [ -e args_test_senta_data_1 ] 13 | then 14 | echo "args_test_senta_data_1 has already existed" 15 | else 16 | ln -s /ssd3/models_test/models_args/PaddleNLP/sentiment_classification/senta_data args_test_senta_data_1 17 | fi 18 | 19 | #prepare model 20 | if [ -e args_test_senta_model ] 21 | then 22 | echo "args_test_senta_model has already existed" 23 | else 24 | #wget https://baidu-nlp.bj.bcebos.com/sentiment_classification-1.0.0.tar.gz --no-check-certificate 25 | #tar -zxf sentiment_classification-1.0.0.tar.gz 26 | #/bin/rm sentiment_classification-1.0.0.tar.gz 27 | #mv senta_model args_test_senta_model 28 | ln -s /ssd3/models_test/models_args/PaddleNLP/sentiment_classification/senta_model args_test_senta_model 29 | fi 30 | -------------------------------------------------------------------------------- /inference/inference_benchmark/python/Paddle/memory.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from jtop import jtop 16 | 17 | if __name__ == "__main__": 18 | 19 | print("Simple Tegrastats reader") 20 | 21 | with jtop() as jetson: 22 | if hasattr(jetson, 'fan'): 23 | print(jetson.fan) 24 | jetson.fan.speed = 30 25 | print(jetson.cpu) 26 | print(jetson.gpu) 27 | print('*** temperature ***') 28 | print(jetson.temperature) 29 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/jetson-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | ROOT=`dirname "$0"` 5 | ROOT=`cd "$ROOT"; pwd` 6 | export CASE_ROOT=$ROOT 7 | export TOOLS_ROOT=$ROOT/tools 8 | 9 | if [[ $# -eq 0 ]] ; then 10 | PADDLE_LIB_PATH=$CASE_ROOT/lib/infer-lib 11 | else 12 | PADDLE_LIB_PATH=$1 13 | fi 14 | 15 | WITH_GPU=ON 16 | if [ $# -ge 2 ]; then 17 | WITH_GPU=$2 18 | fi 19 | 20 | WITH_MKL=OFF 21 | if [ $# -ge 3 ]; then 22 | WITH_MKL=$3 23 | fi 24 | 25 | USE_TENSORRT=ON 26 | if [ $# -ge 4 ]; then 27 | USE_TENSORRT=$4 28 | fi 29 | 30 | TENSORRT_ROOT="/usr/lib/aarch64-linux-gnu/" 31 | if [ $# -ge 5 ]; then 32 | TENSORRT_ROOT=$5 33 | fi 34 | 35 | export CUDA_LIB="/usr/local/cuda/lib64" 36 | 37 | BUILD=$CASE_ROOT/build 38 | mkdir -p $BUILD 39 | cd $BUILD 40 | 41 | cmake $CASE_ROOT/src \ 42 | -DPADDLE_LIB=${PADDLE_LIB_PATH} \ 43 | -DWITH_GPU=${WITH_GPU} \ 44 | -DCUDA_LIB=${CUDA_LIB} \ 45 | -DUSE_TENSORRT=${USE_TENSORRT} \ 46 | -DTENSORRT_INCLUDE_DIR="${TENSORRT_ROOT}/include" \ 47 | -DTENSORRT_LIB_DIR="${TENSORRT_ROOT}" 48 | 49 | make -j4 -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/ocr.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | echo "Test starting... ocr thread 4 batch_size 4 use gpu fluid" 5 | $OUTPUT_BIN/test_ocr --batch_size=4 \ 6 | --use_gpu=true \ 7 | --infer_model=$DATA_ROOT/cpp-model-infer/ocr/model \ 8 | --infer_data=$DATA_ROOT/cpp-model-infer/ocr/data/data.txt \ 9 | --refer_result=$DATA_ROOT/cpp-model-infer/ocr/data/result.txt \ 10 | --repeat=3 --num_threads=4 \ 11 | --gtest_output=xml:test_ocr_gpu.xml 12 | 13 | echo "Test starting... ocr thread 4 batch_size 4 use cpu fluid" 14 | $OUTPUT_BIN/test_ocr --batch_size=4 \ 15 | --use_gpu=false \ 16 | --infer_model=$DATA_ROOT/cpp-model-infer/ocr/model \ 17 | --infer_data=$DATA_ROOT/cpp-model-infer/ocr/data/data.txt \ 18 | --refer_result=$DATA_ROOT/cpp-model-infer/ocr/data/result.txt \ 19 | --repeat=3 --num_threads=4 \ 20 | --gtest_output=xml:test_ocr_cpu.xml 21 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | ROOT=`dirname "$0"` 5 | ROOT=`cd "$ROOT"; pwd` 6 | export CASE_ROOT=$ROOT 7 | export TOOLS_ROOT=$ROOT/tools 8 | 9 | if [[ $# -eq 0 ]] ; then 10 | PADDLE_LIB_PATH=$CASE_ROOT/lib/infer-lib 11 | else 12 | PADDLE_LIB_PATH=$1 13 | fi 14 | 15 | WITH_GPU=ON 16 | if [ $# -ge 2 ]; then 17 | WITH_GPU=$2 18 | fi 19 | 20 | WITH_MKL=ON 21 | if [ $# -ge 3 ]; then 22 | WITH_MKL=$3 23 | fi 24 | 25 | USE_TENSORRT=ON 26 | if [ $# -ge 4 ]; then 27 | USE_TENSORRT=$4 28 | fi 29 | 30 | TENSORRT_ROOT="/usr/local/TensorRT6-cuda10.1-cudnn7" 31 | if [ $# -ge 5 ]; then 32 | TENSORRT_ROOT=$5 33 | fi 34 | 35 | export CUDA_LIB="/usr/local/cuda" 36 | 37 | BUILD=$CASE_ROOT/build 38 | mkdir -p $BUILD 39 | cd $BUILD 40 | 41 | cmake $CASE_ROOT/src \ 42 | -DPADDLE_LIB=${PADDLE_LIB_PATH} \ 43 | -DWITH_GPU=${WITH_GPU} \ 44 | -DWITH_MKL=${WITH_MKL} \ 45 | -DCUDA_LIB=${CUDA_LIB} \ 46 | -DUSE_TENSORRT=${USE_TENSORRT} \ 47 | -DTENSORRT_INCLUDE_DIR="${TENSORRT_ROOT}/include" \ 48 | -DTENSORRT_LIB_DIR="${TENSORRT_ROOT}/lib" 49 | 50 | make -j4 51 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/go.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | project_path=`pwd` 4 | export project_path 5 | echo -e "\033[33m project_path is : ${project_path} \033[0m" 6 | cd ${project_path} 7 | test_cases=`find ./tests -name "test_v1*py" | sort` 8 | cases="${test_cases}" 9 | ignore="" 10 | # download Data 11 | if [ -d "Data" ];then rm -rf Data 12 | fi 13 | # download data with numpy 14 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/inference/Data.tgz --no-check-certificate 15 | tar -xvf Data.tgz 16 | if [ -d "result" ];then rm -rf report 17 | fi 18 | report_path="${project_path}/report" 19 | mkdir -p ${report_path} 20 | for file in ${cases} 21 | do 22 | test_case=`basename ${file}` 23 | test_case_path=`dirname ${file}` 24 | cd ${test_case_path} 25 | echo -e "\033[33m ====> ${test_case} case start \033[0m" 26 | if [[ ${ignore} =~ ${file##*/} ]]; then 27 | echo "跳过" 28 | else 29 | # python -m nose ${test_case} --with-allure --logdir=${report_path} 30 | python -m pytest -v --disable-warnings ${test_case} 31 | fi 32 | echo -e "\033[33m ====> ${test_case} case finish \033[0m" 33 | echo " " 34 | cd - 35 | done 36 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_bernoulli.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test bernoulli 6 | """ 7 | from apibase import APIBase 8 | from apibase import randtool 9 | import paddle 10 | import pytest 11 | from paddle import fluid 12 | import numpy as np 13 | 14 | 15 | class TestBernoulli(APIBase): 16 | """ 17 | test 18 | """ 19 | 20 | def hook(self): 21 | """ 22 | implement 23 | """ 24 | self.types = [np.float32, np.float64] 25 | # self.debug = True 26 | # self.static = True 27 | # enable check grad 28 | self.places = [fluid.CPUPlace()] 29 | self.enable_backward = False 30 | 31 | 32 | obj = TestBernoulli(paddle.bernoulli) 33 | 34 | 35 | @pytest.mark.api_base_bernoulli_vartype 36 | def test_bernoulli_base(): 37 | """ 38 | test base 39 | Returns: 40 | 41 | """ 42 | x = randtool("float", 0, 1, shape=[6, 3]) 43 | res = np.array( 44 | [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]] 45 | ) 46 | obj.run(res=res, x=x) 47 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_conj.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | test conj 6 | """ 7 | 8 | from apibase import APIBase 9 | import paddle 10 | import pytest 11 | import numpy as np 12 | 13 | 14 | class TestConj(APIBase): 15 | """ 16 | test 17 | """ 18 | 19 | def hook(self): 20 | """ 21 | implement 22 | """ 23 | self.types = [np.complex64, np.complex128] 24 | # self.debug = True 25 | # self.static = True 26 | # enable check grad 27 | self.enable_backward = False 28 | 29 | 30 | obj = TestConj(paddle.conj) 31 | 32 | 33 | @pytest.mark.api_base_conj_vartype 34 | def test_real_base(): 35 | """ 36 | base 37 | Returns: 38 | 39 | """ 40 | x = np.random.random((20, 10)) + 1j * np.random.random((20, 10)) 41 | res = np.conj(x) 42 | obj.base(res=res, x=x) 43 | 44 | 45 | @pytest.mark.api_base_conj_parameters 46 | def test_real(): 47 | """ 48 | base 49 | Returns: 50 | 51 | """ 52 | x = np.random.random((20, 10)) + 1j * np.random.random((20, 10)) 53 | res = np.conj(x) 54 | obj.run(res=res, x=x) 55 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/Paddle/compile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | mkdir -p build 5 | cd build 6 | 7 | # same with the class_benchmark_demo.cc 8 | 9 | # DEMO_NAME=clas_benchmark 10 | DEMO_NAME=all 11 | if [ $# -ge 1 ]; then 12 | DEMO_NAME=$1 13 | fi 14 | 15 | WITH_MKL=ON 16 | if [ $# -ge 2 ]; then 17 | WITH_MKL=$2 18 | fi 19 | 20 | WITH_GPU=ON 21 | if [ $# -ge 3 ]; then 22 | WITH_GPU=$3 23 | fi 24 | 25 | USE_TENSORRT=ON 26 | if [ $# -ge 4 ]; then 27 | USE_TENSORRT=$4 28 | fi 29 | 30 | LIB_DIR="/workspace/paddle_inference_install_dir" 31 | if [ $# -ge 5 ]; then 32 | LIB_DIR=$5 33 | fi 34 | 35 | CUDA_LIB="/usr/local/cuda-10.0/lib64" 36 | if [ $# -ge 6 ]; then 37 | CUDA_LIB=$6 38 | fi 39 | 40 | TENSORRT_ROOT="/usr/local/TensorRT6-cuda10.0-cudnn7" 41 | if [ $# -ge 7 ]; then 42 | TENSORRT_ROOT=$7 43 | fi 44 | 45 | cmake ../src -DPADDLE_LIB=${LIB_DIR} \ 46 | -DWITH_MKL=${WITH_MKL} \ 47 | -DDEMO_NAME=${DEMO_NAME} \ 48 | -DWITH_GPU=${WITH_GPU} \ 49 | -DWITH_STATIC_LIB=OFF \ 50 | -DUSE_TENSORRT=${USE_TENSORRT} \ 51 | -DCUDA_LIB=${CUDA_LIB} \ 52 | -DTENSORRT_ROOT=${TENSORRT_ROOT} 53 | 54 | make -j 55 | -------------------------------------------------------------------------------- /framework_api/cts_tools.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """cts test tools""" 15 | 16 | import numpy as np 17 | from nose import tools 18 | 19 | 20 | def check_data(result, expect, delta=None): 21 | """ 22 | 比对结果 23 | :param result: 输出结果 numpy 24 | :param expect: 预期结果 array 25 | :param delta: 精度误差值 26 | :return: 27 | """ 28 | if delta: 29 | for i in range(len(expect)): 30 | tools.assert_almost_equal( 31 | result[i], np.float32(expect[i]), delta=delta) 32 | else: 33 | for i in range(len(expect)): 34 | tools.assert_equal(result[i], np.float32(expect[i])) 35 | -------------------------------------------------------------------------------- /test/tools/tool-test-train-performance/service.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | 5 | import json 6 | from flask import Flask 7 | from flask_restful import Resource, Api 8 | from flask import request 9 | 10 | app = Flask(__name__) 11 | api = Api(app) 12 | 13 | 14 | def run_cmd(cmd): 15 | import subprocess 16 | process = subprocess.Popen( 17 | cmd, 18 | stdout=subprocess.PIPE, 19 | stderr=subprocess.STDOUT, 20 | shell=True, 21 | universal_newlines=True) 22 | out, err = process.communicate() 23 | return out, process.returncode 24 | 25 | 26 | @app.route("/tool-8") 27 | def run(): 28 | parameter_dict = request.get_json() 29 | cmd = "bash run_tools8.sh {} {}".format(parameter_dict['model_name'], 30 | parameter_dict['cards']) 31 | print(parameter_dict, cmd) 32 | 33 | out, ret = run_cmd(cmd) 34 | if ret != 0: 35 | result = {"status": 500, "msg": out, "result": "FAIL"} 36 | else: 37 | result = {"status": 200, "msg": out, "result": "PASS"} 38 | return json.dumps(result) 39 | 40 | 41 | if __name__ == '__main__': 42 | app.run(host="0.0.0.0", port=8708, debug=False) 43 | -------------------------------------------------------------------------------- /test/tools/tool-test-train-resource/service.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | 5 | import json 6 | from flask import Flask 7 | from flask_restful import Resource, Api 8 | from flask import request 9 | 10 | app = Flask(__name__) 11 | api = Api(app) 12 | 13 | 14 | def run_cmd(cmd): 15 | import subprocess 16 | process = subprocess.Popen( 17 | cmd, 18 | stdout=subprocess.PIPE, 19 | stderr=subprocess.STDOUT, 20 | shell=True, 21 | universal_newlines=True) 22 | out, err = process.communicate() 23 | return out, process.returncode 24 | 25 | 26 | @app.route("/tool-9") 27 | def run(): 28 | parameter_dict = request.get_json() 29 | cmd = "bash run_tools9.sh {} {}".format(parameter_dict['model_name'], 30 | parameter_dict['cards']) 31 | print(parameter_dict, cmd) 32 | 33 | out, ret = run_cmd(cmd) 34 | if ret != 0: 35 | result = {"status": 500, "msg": out, "result": "FAIL"} 36 | else: 37 | result = {"status": 200, "msg": out, "result": "PASS"} 38 | 39 | return json.dumps(result) 40 | 41 | 42 | if __name__ == '__main__': 43 | app.run(host="0.0.0.0", port=8709, debug=False) 44 | -------------------------------------------------------------------------------- /tipc/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #REPO=${REPO} 3 | BRANCH=${BRANCH:-develop} 4 | #AGILE_PULL_ID=$3 5 | #AGILE_REVISION=$4 6 | ROOT_PATH=${ROOT_PATH:-/home/work/tipc/} 7 | 8 | work_dir=${ROOT_PATH}/${REPO} 9 | mkdir -p ${work_dir} 10 | rm -rf ${work_dir}/* 11 | cd ${work_dir} 12 | 13 | unset http_proxy 14 | unset https_proxy 15 | 16 | # Git clone 17 | if [ -d Paddle ]; then rm -rf Paddle; fi 18 | git clone --depth=200 https://github.com/PaddlePaddle/Paddle.git 19 | cd Paddle 20 | #git fetch origin pull/${AGILE_PULL_ID}/head 21 | #git checkout -b test FETCH_HEAD 22 | 23 | # download test model repo 24 | git clone --depth=100 https://github.com/LDOUBLEV/AutoLog; 25 | git clone --depth=100 https://github.com/PaddlePaddle/continuous_integration.git; 26 | git clone --depth=2 https://github.com/PaddlePaddle/${REPO}.git -b ${BRANCH}; 27 | 28 | cd ${work_dir} 29 | tar -zcf ${REPO}.tar.gz Paddle 30 | file_tgz=${REPO}.tar.gz 31 | 32 | # Push BOS 33 | # pip install pycrypto 34 | push_dir=/home 35 | push_file=${push_dir}/bce-python-sdk-0.8.27/BosClient.py 36 | if [ ! -f ${push_file} ];then 37 | set +x 38 | set -x 39 | tar xf /home/bce_whl.tar.gz -C ${push_dir} 40 | fi 41 | 42 | cd ${work_dir} 43 | python ${push_file} ${file_tgz} ${paddle_package}/fullchain_ce_test/${AGILE_PULL_ID}/${AGILE_REVISION} 44 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/similarity_net/run_prepare.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/sh 3 | 4 | ROOT_PATH=$1 5 | 6 | if [ -e data ] 7 | then 8 | mv data data.bak 9 | fi 10 | if [ ! -e data.tgz ] 11 | then 12 | wget https://sys-p0.bj.bcebos.com/models/PaddleNLP/similarity_net/data.tgz --no-check-certificate 13 | fi 14 | tar -zxf data.tgz 15 | 16 | 17 | #prepare data 18 | if [ -e args_test_data ] 19 | then 20 | echo "args_test_data has already existed" 21 | else 22 | #ln -s /ssd3/models_test/models_args/PaddleNLP/similarity_net/data args_test_data 23 | #ln -s ${ROOT_PATH}/data/PaddleNLP/similarity_net/data args_test_data 24 | ln -s data args_test_data 25 | fi 26 | 27 | #prepare pre_model 28 | if [ -e args_test_model_files ] 29 | then 30 | echo "args_test_model_files has already existed" 31 | else 32 | #ln -s /ssd3/models_test/models_args/PaddleNLP/similarity_net/model_files args_test_model_files 33 | #ln -s ${ROOT_PATH}/data/PaddleNLP/similarity_net/model_files args_test_model_files 34 | ln -s data args_test_model_files 35 | fi 36 | 37 | # 38 | if [ -e args_test_output_1 ] 39 | then 40 | /bin/rm -rf args_test_output_1 41 | mkdir args_test_output_1 42 | fi 43 | if [ -e args_test_output_2 ] 44 | then 45 | /bin/rm -rf args_test_output_2 46 | mkdir args_test_output_2 47 | fi 48 | -------------------------------------------------------------------------------- /inference/inference_benchmark/python/TensorFlow/README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow Inference Tests 2 | 3 | ## load from frozen graph and inference 4 | ```shell 5 | python clas_benchmark.py --model_name="resnet50" \ 6 | --model_path=./resnet_pb/resnet50.pb \ 7 | --input_node="import/input_tensor:0" \ 8 | --output_node="softmax_tensor:0" 9 | ``` 10 | 11 | ## convert frozen graph .pb to saved_model 12 | ```shell 13 | # Notice: input node and output node depends on model's graph 14 | # the follow codes are just an example 15 | python convert_pb2savemodel.py --model_path=./resnet_pb/resnet50.pb \ 16 | --output_path=./resnet50_model \ 17 | --input_node="input_tensor:0" \ 18 | --output_node="softmax_tensor:0" 19 | ``` 20 | 21 | ## convert TF fp32 saved_model to TF trt saved_model 22 | ```shell 23 | python convert_savemodel2trtgraph.py --model_path=./resnet50_model \ 24 | --output_path=./resnet50_model_trt_fp32 \ 25 | --trt_precision="fp32" 26 | ``` 27 | 28 | ## load from saved model and inference 29 | ```shell 30 | python clas_savemodel_benchmark.py --model_path=./resnet50_model_trt_fp32 --use_gpu 31 | ``` 32 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/text_classification.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | echo "Test starting... text_classification thread 4 batch_size 4 use gpu" 4 | $OUTPUT_BIN/test_text_classification --use_gpu=true \ 5 | --infer_model=$DATA_ROOT/cpp-model-infer/text_classification/model \ 6 | --infer_data=$DATA_ROOT/cpp-model-infer/text_classification/data/data.txt \ 7 | --batch_size=4 \ 8 | --num_threads=4 \ 9 | --repeat=3 \ 10 | --gtest_output=xml:test_text_classification_gpu.xml 11 | 12 | echo "Test starting... text_classification thread 4 batch_size 4 use cpu" 13 | $OUTPUT_BIN/test_text_classification --use_gpu=false \ 14 | --infer_model=$DATA_ROOT/cpp-model-infer/text_classification/model \ 15 | --infer_data=$DATA_ROOT/cpp-model-infer/text_classification/data/data.txt \ 16 | --batch_size=4 \ 17 | --num_threads=4 \ 18 | --repeat=3 \ 19 | --gtest_output=xml:test_text_classification_cpu.xml 20 | -------------------------------------------------------------------------------- /test/tools/tool-test-train-resource/run_MobileNetV1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | run_log_path=${TRAIN_LOG_DIR:-$(pwd)} 4 | max_epoch=3 5 | batch_size=128 6 | model_name=dynamic_MobileNetV1_bs128 7 | num_gpu_devices=${1:-"1"} 8 | 9 | train_cmd="-c ./ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml 10 | -o Global.epochs=${max_epoch} 11 | -o Global.eval_during_train=False 12 | -o Global.save_interval=2 13 | -o DataLoader.Train.sampler.batch_size=${batch_size} 14 | -o DataLoader.Train.loader.num_workers=8" 15 | if [ ${num_gpu_devices} = "1" ]; then 16 | run_mode=sp 17 | train_cmd="python -u tools/train.py "${train_cmd} 18 | else 19 | run_mode=mp 20 | rm -rf ./mylog_${model_name} 21 | train_cmd="python -m paddle.distributed.launch --gpus=$CUDA_VISIBLE_DEVICES --log_dir ./mylog_${model_name} tools/train.py "${train_cmd} 22 | log_parse_file="mylog_${model_name}/workerlog.0" 23 | fi 24 | log_file=${run_log_path}/dynamic_MobileNetV1_bs128_${num_gpu_devices}_${run_mode} 25 | # 运行模型 26 | timeout 15m ${train_cmd} > ${log_file} 2>&1 27 | 28 | if [ ${run_mode} != "sp" -a -d mylog_${model_name} ]; then 29 | rm ${log_file} 30 | cp mylog_${model_name}/`ls -l mylog_${model_name}/ | awk '/^[^d]/ {print $5,$9}' | sort -nr | head -1 | awk '{print $2}'` ${log_file} 31 | fi 32 | -------------------------------------------------------------------------------- /test/tools/tool-test-train-performance/run_MobileNetV1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | run_log_path=${TRAIN_LOG_DIR:-$(pwd)} 4 | max_epoch=3 5 | batch_size=128 6 | model_name=dynamic_MobileNetV1_bs128 7 | num_gpu_devices=${1:-"1"} 8 | 9 | train_cmd="-c ./ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml 10 | -o Global.epochs=${max_epoch} 11 | -o Global.eval_during_train=False 12 | -o Global.save_interval=2 13 | -o DataLoader.Train.sampler.batch_size=${batch_size} 14 | -o DataLoader.Train.loader.num_workers=8" 15 | if [ ${num_gpu_devices} = "1" ]; then 16 | run_mode=sp 17 | train_cmd="python -u tools/train.py "${train_cmd} 18 | else 19 | run_mode=mp 20 | rm -rf ./mylog_${model_name} 21 | train_cmd="python -m paddle.distributed.launch --gpus=$CUDA_VISIBLE_DEVICES --log_dir ./mylog_${model_name} tools/train.py "${train_cmd} 22 | log_parse_file="mylog_${model_name}/workerlog.0" 23 | fi 24 | log_file=${run_log_path}/dynamic_MobileNetV1_bs128_${num_gpu_devices}_${run_mode} 25 | # 运行模型 26 | timeout 15m ${train_cmd} > ${log_file} 2>&1 27 | 28 | if [ ${run_mode} != "sp" -a -d mylog_${model_name} ]; then 29 | rm ${log_file} 30 | cp mylog_${model_name}/`ls -l mylog_${model_name}/ | awk '/^[^d]/ {print $5,$9}' | sort -nr | head -1 | awk '{print $2}'` ${log_file} 31 | fi 32 | -------------------------------------------------------------------------------- /test/tools/tool-check-availability-of-installation/service.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | 5 | import os 6 | import time 7 | import argparse 8 | 9 | import requests 10 | import json 11 | import time 12 | import datetime 13 | from flask import Flask 14 | from flask_restful import Resource, Api 15 | from flask import request 16 | 17 | app = Flask(__name__) 18 | api = Api(app) 19 | 20 | 21 | def run_cmd(cmd): 22 | import subprocess 23 | process = subprocess.Popen( 24 | cmd, 25 | stdout=subprocess.PIPE, 26 | stderr=subprocess.STDOUT, 27 | shell=True, 28 | universal_newlines=True) 29 | out, err = process.communicate() 30 | return out, process.returncode 31 | 32 | 33 | def get_cmd(parameter_dict): 34 | p = parameter_dict 35 | return 'sh tool.sh {}'.format(p['check_item']) 36 | 37 | 38 | @app.route("/tool-1") 39 | def run(): 40 | 41 | parameter_dict = request.get_json() 42 | cmd = get_cmd(parameter_dict) 43 | print(parameter_dict, cmd) 44 | 45 | out, ret = run_cmd(cmd) 46 | if ret != 0: 47 | result = {"status": 500, "msg": out, "result": "FAIL"} 48 | else: 49 | result = {"status": 200, "msg": out, "result": "PASS"} 50 | return json.dumps(result) 51 | 52 | 53 | if __name__ == '__main__': 54 | app.run(host="0.0.0.0", port=8701, debug=False) 55 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/TensorFlow/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.3 FATAL_ERROR) 2 | find_package(TensorflowCC REQUIRED) 3 | 4 | #include(FindProtobuf) 5 | #find_package(Protobuf REQUIRED) 6 | #include_directories(${PROTOBUF_INCLUDE_DIR}) 7 | 8 | include(external-cmake/gflag.cmake) 9 | include(external-cmake/glog.cmake) 10 | 11 | include_directories(${CMAKE_BINARY_DIR}/3rdparty/glog/include) 12 | include_directories(${CMAKE_BINARY_DIR}/3rdparty/gflag-2.2.2/include) 13 | 14 | link_directories(${CMAKE_BINARY_DIR}/3rdparty/glog/lib) 15 | link_directories(${CMAKE_BINARY_DIR}/3rdparty/gflag-2.2.2/lib) 16 | 17 | # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0") 18 | 19 | #set(EXTERNAL_LIB "-lrt -ldl -lprotobuf") 20 | #set(EXTERNAL_LIB "-lrt -ldl -lpthread") 21 | #set(EXTERNAL_LIB protobuf ${EXTERNAL_LIB}) 22 | 23 | add_executable(clas_benchmark clas_benchmark.cc) 24 | add_dependencies(clas_benchmark glog gflag-2.2.2) 25 | target_link_libraries(clas_benchmark TensorflowCC::TensorflowCC ${EXTERNAL_LIB}) 26 | 27 | 28 | # link cuda if it is available 29 | find_package(CUDA) 30 | if(CUDA_FOUND) 31 | target_link_libraries(clas_benchmark ${CUDA_LIBRARIES}) 32 | endif() 33 | 34 | # link gflags 35 | target_link_libraries(clas_benchmark gflags) 36 | target_link_libraries(clas_benchmark libglog.so) 37 | 38 | # find_package (glog 0.4.0 REQUIRED) 39 | # target_link_libraries(clas_benchmark glog::glog) 40 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/Paddle/bin/run_debug_benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | ROOT=`dirname "$0"` 5 | ROOT=`cd "$ROOT/.."; pwd` 6 | export OUTPUT=$ROOT/output 7 | export OUTPUT_BIN=$ROOT/build 8 | export DATA_ROOT=$ROOT/Data 9 | export CASE_ROOT=$ROOT/bin 10 | export LOG_ROOT=$ROOT/log 11 | export UTILS_ROOT=$ROOT/utils 12 | export gpu_type=`nvidia-smi -q | grep "Product Name" | head -n 1 | awk '{print $NF}'` 13 | source $ROOT/bin/run_clas_mkl_benchmark.sh 14 | #source $ROOT/bin/run_det_mkl_benchmark.sh 15 | # test model type 16 | model_type="static" 17 | if [ $# -ge 1 ]; then 18 | model_type=$1 19 | fi 20 | export MODEL_TYPE=${model_type} 21 | 22 | # test run-time device 23 | device_type="gpu" 24 | if [ $# -ge 2 ]; then 25 | device_type=$2 26 | fi 27 | 28 | mkdir -p $DATA_ROOT 29 | cd $DATA_ROOT 30 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 31 | echo "==== Download PaddleClas data and models ====" 32 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 33 | tar -zxf PaddleClas.tgz 34 | fi 35 | 36 | cd - 37 | 38 | mkdir -p $LOG_ROOT 39 | 40 | echo "==== run ${MODEL_TYPE} model benchmark ====" 41 | 42 | if [ "${MODEL_TYPE}" == "static" ]; then 43 | if [ "${device_type}" == "gpu" ]; then 44 | bash $CASE_ROOT/run_debug_clas_gpu_trt_benchmark.sh "${DATA_ROOT}/PaddleClas/infer_static" 45 | fi 46 | fi 47 | -------------------------------------------------------------------------------- /test/tools/tool-test-train-resource/run_ResNet50_vd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #cd PaddleClas/ 3 | # 运行:sh run_ResNet50_vd.sh {num_gpu_devices} 4 | 5 | run_log_path=${TRAIN_LOG_DIR:-$(pwd)} 6 | max_epoch=3 7 | batch_size=64 8 | model_name=dynamic_ResNet50_vd 9 | num_gpu_devices=${1:-"1"} 10 | 11 | train_cmd="-c ./ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml 12 | -o Global.epochs=${max_epoch} 13 | -o Global.eval_during_train=False 14 | -o Global.save_interval=2 15 | -o DataLoader.Train.sampler.batch_size=${batch_size} 16 | -o DataLoader.Train.loader.num_workers=8" 17 | 18 | if [ ${num_gpu_devices} = "1" ]; then 19 | run_mode=sp 20 | train_cmd="python -m paddle.distributed.launch --gpus=$CUDA_VISIBLE_DEVICES tools/train.py "${train_cmd} 21 | else 22 | run_mode=mp 23 | rm -rf ./mylog_${model_name} 24 | train_cmd="python -m paddle.distributed.launch --gpus=$CUDA_VISIBLE_DEVICES --log_dir ./mylog_${model_name} tools/train.py "${train_cmd} 25 | log_parse_file="mylog_${model_name}/workerlog.0" 26 | 27 | fi 28 | log_file=${run_log_path}/dynamic_ResNet50_vd_bs64_${num_gpu_devices}_${run_mode} 29 | 30 | # 运行模型 31 | timeout 15m ${train_cmd} > ${log_file} 2>&1 32 | if [ ${run_mode} != "sp" -a -d mylog_${model_name} ]; then 33 | rm ${log_file} 34 | cp mylog_${model_name}/`ls -l mylog_${model_name}/ | awk '/^[^d]/ {print $5,$9}' | sort -nr | head -1 | awk '{print $2}'` ${log_file} 35 | fi 36 | -------------------------------------------------------------------------------- /test/tools/tool-test-train-performance/run_ResNet50_vd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #cd PaddleClas/ 3 | # 运行:sh run_ResNet50_vd.sh {num_gpu_devices} 4 | 5 | run_log_path=${TRAIN_LOG_DIR:-$(pwd)} 6 | max_epoch=3 7 | batch_size=64 8 | model_name=dynamic_ResNet50_vd 9 | num_gpu_devices=${1:-"1"} 10 | 11 | train_cmd="-c ./ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml 12 | -o Global.epochs=${max_epoch} 13 | -o Global.eval_during_train=False 14 | -o Global.save_interval=2 15 | -o DataLoader.Train.sampler.batch_size=${batch_size} 16 | -o DataLoader.Train.loader.num_workers=8" 17 | 18 | if [ ${num_gpu_devices} = "1" ]; then 19 | run_mode=sp 20 | train_cmd="python -m paddle.distributed.launch --gpus=$CUDA_VISIBLE_DEVICES tools/train.py "${train_cmd} 21 | else 22 | run_mode=mp 23 | rm -rf ./mylog_${model_name} 24 | train_cmd="python -m paddle.distributed.launch --gpus=$CUDA_VISIBLE_DEVICES --log_dir ./mylog_${model_name} tools/train.py "${train_cmd} 25 | log_parse_file="mylog_${model_name}/workerlog.0" 26 | 27 | fi 28 | log_file=${run_log_path}/dynamic_ResNet50_vd_bs64_${num_gpu_devices}_${run_mode} 29 | 30 | # 运行模型 31 | timeout 15m ${train_cmd} > ${log_file} 2>&1 32 | if [ ${run_mode} != "sp" -a -d mylog_${model_name} ]; then 33 | rm ${log_file} 34 | cp mylog_${model_name}/`ls -l mylog_${model_name}/ | awk '/^[^d]/ {print $5,$9}' | sort -nr | head -1 | awk '{print $2}'` ${log_file} 35 | fi 36 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/py_sed.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import argparse 4 | 5 | def parse_args(): 6 | """ parse input args """ 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--input_file", type=str, help="xml reports need to change name") 9 | parser.add_argument("--testsuite_old_name", type=str, 10 | help="old testsuite name need to be changed") 11 | return parser.parse_args() 12 | 13 | class Sed(object): 14 | """ Sed """ 15 | def __init__(self, oldstr, newstr): 16 | """ __init__ """ 17 | self.old_str = oldstr 18 | self.new_str = newstr 19 | 20 | def sedfile(self, old_file, tmp_file="tmp.xml"): 21 | """ sed file """ 22 | with open(old_file, 'r') as self.f, open(tmp_file, "a+") as self.f1: 23 | for self.i in self.f: 24 | if self.old_str in self.i: 25 | self.i = self.i.replace(self.old_str, self.new_str) 26 | self.f1.write(self.i) 27 | self.f1.flush() 28 | os.remove(old_file) # delete old file 29 | os.rename(tmp_file, old_file) # move tmp to origin 30 | 31 | if __name__ == '__main__': 32 | args = parse_args() 33 | 34 | # e.g. test_AlexNet_gpu_1e-5_bz1.xml 35 | input_test_case_name = args.input_file.split('.xml')[0] 36 | 37 | # e.g. test_pdclas_model 38 | old_suite_name = args.testsuite_old_name 39 | 40 | sed = Sed(old_suite_name, input_test_case_name) 41 | sed.sedfile(args.input_file) 42 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_Sigmoid.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | /*************************************************************************** 6 | * 7 | * Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved 8 | * @file test_Sigmoid.py 9 | * @author jiaxiao01 10 | * @date 2020/7/22 16:00 11 | * @brief test paddle.nn.Sigmoid 12 | * 13 | **************************************************************************/ 14 | """ 15 | 16 | from apibase import APIBase 17 | import paddle 18 | import pytest 19 | import numpy as np 20 | 21 | 22 | class TestSigmoid(APIBase): 23 | """ 24 | test Sigmoid 25 | """ 26 | def hook(self): 27 | """ 28 | implement 29 | """ 30 | self.types = [np.float32, np.float64] 31 | #self.debug = True 32 | #self.static = True 33 | # enable check grad 34 | self.enable_backward = True 35 | 36 | 37 | obj = TestSigmoid(paddle.nn.Sigmoid) 38 | 39 | @pytest.mark.p0 40 | def test_Sigmoid_base(): 41 | """ 42 | Sigmoid_base 43 | """ 44 | x_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32') 45 | res = np.array([0.7310586, 0.880797, 0.95257413, 0.98201376]) 46 | obj.base(res=res, data=x_data) 47 | 48 | 49 | def test_Sigmoid_input0(): 50 | """ 51 | input=[1.0, 2.0, 3.0, 4.0] 52 | """ 53 | x_data = np.array([1.0, 2.0, 3.0, 4.0]).astype('float32') 54 | res = np.array([0.7310586, 0.880797, 0.95257413, 0.98201376]) 55 | obj.run(res=res, data=x_data) 56 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/seq2seq/seq2seq/test_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | this is seq2seq args 4 | """ 5 | 6 | train = { 7 | "tar_lang": ["vi"], 8 | "src_lang": ["en"], 9 | "attention": [True, False], 10 | "num_layers": [2], 11 | "hidden_size": [512, 256], 12 | "src_vocab_size": [17191], 13 | "tar_vocab_size": [7709], 14 | "batch_size": [128, 64], 15 | "dropout": [0.2, 0.1], 16 | "init_scale": [0.1, 0.2], 17 | "max_grad_norm": [5.0, 4.9], 18 | "train_data_prefix": ["data/en-vi/train"], 19 | "eval_data_prefix": ["data/en-vi/tst2012"], 20 | "test_data_prefix": ["data/en-vi/tst2013"], 21 | "vocab_prefix": ["data/en-vi/vocab"], 22 | "use_gpu": [True], 23 | "model_path": ["./attention_models", "./models"], 24 | "max_epoch": [1, 2], 25 | "optimizer": ["adam"], 26 | "learning_rate": ["0.001"], 27 | } 28 | 29 | infer = { 30 | "tar_lang": ["vi"], 31 | "src_lang": ["en"], 32 | "attention": [True], 33 | "num_layers": [2], 34 | "hidden_size": [512], 35 | "src_vocab_size": [17191], 36 | "tar_vocab_size": [7709], 37 | "batch_size": [128, 64], 38 | "dropout": [0.2, 0.1], 39 | "init_scale": [0.1, 0.2], 40 | "max_grad_norm": [5.0, 4.9], 41 | "vocab_prefix": ["data/en-vi/vocab"], 42 | "infer_file": ["data/en-vi/tst2013.en", "data/en-vi/tst2012.en"], 43 | "reload_model": ["attention_models/epoch_0"], 44 | "use_gpu": [True], 45 | "infer_output_file": [ 46 | "attention_infer_output/infer_output.txt", 47 | "infer_output/infer_output.txt" 48 | ], 49 | "beam_size": [5, 10], 50 | } 51 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_yolov3_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_yolov3_cpu(): 31 | """ 32 | Inference and check value 33 | yolov3 cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "yolov3_darknet" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_yolov3_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_yolov3_gpu(): 32 | """ 33 | Inference and check value 34 | yolov3 gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "yolov3_darknet" 41 | tmp_path = os.path.join(TestBase.model_root, "Detection") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_mask_rcnn_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_mask_rcnn_cpu(): 31 | """ 32 | Inference and check value 33 | mask_rcnn cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "mask_rcnn_r50_1x" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /test/tools/tool-test-op-correctness/cases/test_relu6.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python 4 | """ 5 | /*************************************************************************** 6 | * 7 | * Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved 8 | * @file test_relu6.py 9 | * @author zhengtianyu 10 | * @date 2020-08-25 17:30:01 11 | * @brief test_relu6 12 | * 13 | **************************************************************************/ 14 | """ 15 | from apibase import APIBase 16 | from apibase import randtool 17 | import paddle 18 | import pytest 19 | import numpy as np 20 | 21 | 22 | class TestRelu6(APIBase): 23 | """ 24 | test 25 | """ 26 | 27 | def hook(self): 28 | """ 29 | implement 30 | """ 31 | self.types = [np.float32, np.float64] 32 | # self.debug = True 33 | # self.static = True 34 | # enable check grad 35 | # self.enable_backward = True 36 | 37 | 38 | obj = TestRelu6(paddle.nn.ReLU6) 39 | 40 | 41 | @pytest.mark.p0 42 | def test_relu6_base(): 43 | """ 44 | base 45 | """ 46 | x = np.array([-1, 0.3, 6.5]) 47 | res = np.minimum(np.maximum(0, x), 6) 48 | obj.base(res=res, data=x) 49 | 50 | 51 | def test_relu6(): 52 | """ 53 | default 54 | """ 55 | x = randtool("float", -10, 10, [3, 3, 3]) 56 | res = np.minimum(np.maximum(0, x), 6) 57 | obj.run(res=res, data=x) 58 | 59 | 60 | def test_relu61(): 61 | """ 62 | x = np.array([6, 6, 6, 6] 63 | """ 64 | x = np.array([6, 6, 6, 6]) 65 | res = np.minimum(np.maximum(0, x), 6) 66 | obj.run(res=res, data=x) 67 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_blazeface_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_blazeface_cpu(): 31 | """ 32 | Inference and check value 33 | blazeface cpu model 34 | Args: 35 | None 36 | Returns: 37 | None 38 | """ 39 | model_name = "blazeface_nas_128" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_resnet50_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_resnet50_cpu(): 31 | """ 32 | Inference and check value 33 | resnet50 cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "ResNet50_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_mask_rcnn_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p1 31 | def test_inference_mask_rcnn_gpu(): 32 | """ 33 | Inference and check value 34 | mask_rcnn gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "mask_rcnn_r50_1x" 41 | tmp_path = os.path.join(TestBase.model_root, "Detection") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_faster_rcnn_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_faster_rcnn_cpu(): 31 | """ 32 | Inference and check value 33 | faster_rcnn cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "faster_rcnn_r50_1x" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_blazeface_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_blazeface_gpu(): 32 | """ 33 | Inference and check value 34 | blazeface gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "blazeface_nas_128" 41 | tmp_path = os.path.join(TestBase.model_root, "Detection") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_faster_rcnn_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | TestBase = TestModelInferenceGPU() 27 | 28 | 29 | @pytest.mark.p0 30 | def test_inference_faster_rcnn_gpu(): 31 | """ 32 | Inference and check value 33 | faster_rcnn gpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "faster_rcnn_r50_1x" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_resnet50_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_resnet50_gpu(): 32 | """ 33 | Inference and check value 34 | resnet50 gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "ResNet50_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_det_mv3_db_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU(data_path="Data") 28 | 29 | @pytest.mark.p0 30 | def test_inference_det_mv3_db_cpu(): 31 | """ 32 | Inference and check value 33 | det_mv3_db cpu model 34 | Args: 35 | None 36 | Returns: 37 | None 38 | """ 39 | model_name = "det_mv3_db" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_det_data", "data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_xception41_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_xception41_cpu(): 31 | """ 32 | Inference and check value 33 | xception41 cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "Xception_41_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_yolov3_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_yolov3_mkldnn(): 31 | """ 32 | Inference and check value 33 | yolov3 mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "yolov3_darknet" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_mobilenetv1_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_mobilenetv1_cpu(): 31 | """ 32 | Inference and check value 33 | mobilenetv1 cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "MobileNetV1_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_det_mv3_db_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU(data_path="Data") 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_det_mv3_db_gpu(): 32 | """ 33 | Inference and check value 34 | det_mv3_db gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "det_mv3_db" 41 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 42 | model_path = os.path.join(tmp_path, model_name) 43 | data_path = os.path.join(tmp_path, "word_det_data", "data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_xception41_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_xception41_gpu(): 32 | """ 33 | Inference and check value 34 | xception41 gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "Xception_41_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_deeplabv3_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_deeplabv3_cpu(): 31 | """ 32 | Inference and check value 33 | deeplabv3_mobilenetv2 cpu model 34 | Args: 35 | None 36 | Returns: 37 | None 38 | """ 39 | model_name = "deeplabv3_mobilenetv2" 40 | tmp_path = os.path.join(TestBase.model_root, "segmentation") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_det_mv3_east_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU(data_path="Data") 28 | 29 | @pytest.mark.p0 30 | def test_inference_det_mv3_east_cpu(): 31 | """ 32 | Inference and check value 33 | det_mv3_east cpu model 34 | Args: 35 | None 36 | Returns: 37 | None 38 | """ 39 | model_name = "det_mv3_east" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_det_data", "data.json") 43 | delta = 0.001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_seresnext50_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU() 28 | 29 | @pytest.mark.p0 30 | def test_inference_seresnext50_cpu(): 31 | """ 32 | Inference and check value 33 | seresnext50 cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "SE_ResNeXt50_32x4d_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_det_mv3_east_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU(data_path="Data") 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_det_mv3_east_gpu(): 32 | """ 33 | Inference and check value 34 | det_mv3_east gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "det_mv3_east" 41 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 42 | model_path = os.path.join(tmp_path, model_name) 43 | data_path = os.path.join(tmp_path, "word_det_data", "data.json") 44 | delta = 0.001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_mobilenetv1_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_mobilenetv1_gpu(): 32 | """ 33 | Inference and check value 34 | mobilenetv1 gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "MobileNetV1_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_blazeface_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_blazeface_mkldnn(): 31 | """ 32 | Inference and check value 33 | blazeface mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "blazeface_nas_128" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/Lucas-C/pre-commit-hooks.git 3 | sha: v1.0.1 4 | hooks: 5 | - id: remove-crlf 6 | files: (?!.*third_party)^.*$ | (?!.*book)^.*$ 7 | - repo: https://github.com/PaddlePaddle/mirrors-yapf.git 8 | sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37 9 | hooks: 10 | - id: yapf 11 | files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | sha: 5bf6c09bfa1297d3692cadd621ef95f1284e33c0 14 | hooks: 15 | - id: check-added-large-files 16 | - id: check-merge-conflict 17 | - id: check-symlinks 18 | - id: detect-private-key 19 | files: (?!.*third_party)^.*$ | (?!.*book)^.*$ 20 | - id: end-of-file-fixer 21 | - repo: local 22 | hooks: 23 | - id: cpplint-cpp-source 24 | name: cpplint 25 | description: Check C++ code style using cpplint.py. 26 | entry: bash ./tools/codestyle/cpplint_pre_commit.hook 27 | language: system 28 | files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$ 29 | - repo: local 30 | hooks: 31 | - id: pylint-doc-string 32 | name: pylint 33 | description: Check python docstring style using docstring_checker. 34 | entry: bash ./tools/codestyle/pylint_pre_commit.hook 35 | language: system 36 | files: \.(py)$ 37 | - repo: local 38 | hooks: 39 | - id: copyright_checker 40 | name: copyright_checker 41 | entry: python ./tools/codestyle/copyright.hook 42 | language: system 43 | files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto|py)$ 44 | exclude: (?!.*third_party)^.*$ | (?!.*book)^.*$ 45 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_deeplabv3_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_deeplabv3_gpu(): 32 | """ 33 | Inference and check value 34 | deeplabv3_mobilenetv2 gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "deeplabv3_mobilenetv2" 41 | tmp_path = os.path.join(TestBase.model_root, "segmentation") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_seresnext50_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_seresnext50_gpu(): 32 | """ 33 | Inference and check value 34 | seresnext50 gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "SE_ResNeXt50_32x4d_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_mask_rcnn_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_mask_rcnn_mkldnn(): 31 | """ 32 | Inference and check value 33 | mask_rcnn mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "mask_rcnn_r50_1x" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_yolov3_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_yolov3_trt_fp32(): 32 | """ 33 | Inference and check value 34 | yolov3 trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "yolov3_darknet" 41 | tmp_path = os.path.join(TestBase.model_root, "Detection") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_faster_rcnn_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_faster_rcnn_mkldnn(): 31 | """ 32 | Inference and check value 33 | faster_rcnn mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "faster_rcnn_r50_1x" 40 | tmp_path = os.path.join(TestBase.model_root, "Detection") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_resnet50_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_resnet50_mkldnn(): 31 | """ 32 | Inference and check value 33 | resnet50 mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "ResNet50_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_hub_ernie_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import argparse 15 | import collections 16 | import os 17 | import sys 18 | import logging 19 | import struct 20 | import six 21 | 22 | import pytest 23 | import nose 24 | import numpy as np 25 | 26 | from test_gpu_helper import TestModelInferenceGPU 27 | 28 | TestBase = TestModelInferenceGPU(data_path="Data") 29 | 30 | 31 | @pytest.mark.p1 32 | def test_inference_hub_ernie_gpu(): 33 | """ 34 | Inference and check value 35 | hub_ernie gpu model 36 | Args: 37 | None 38 | Return: 39 | None 40 | """ 41 | model_name = "hub-ernie" 42 | tmp_path = os.path.join(TestBase.model_root, model_name) 43 | model_path = os.path.join(tmp_path, "hub-ernie-model") 44 | data_path = os.path.join(tmp_path, "hub-ernie-data", "data.json") 45 | delta = 0.0001 46 | 47 | res, exp = TestBase.get_infer_results(model_path, data_path) 48 | 49 | for i in range(len(res)): 50 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 51 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_det_mv3_db_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN(data_path="Data") 28 | 29 | @pytest.mark.p0 30 | def test_inference_det_mv3_db_mkldnn(): 31 | """ 32 | Inference and check value 33 | det_mv3_db mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "det_mv3_db" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_det_data", "data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_det_mv3_east_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN(data_path="Data") 28 | 29 | @pytest.mark.p0 30 | def test_inference_det_mv3_east_mkldnn(): 31 | """ 32 | Inference and check value 33 | det_mv3_east mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "det_mv3_east" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_det_data", "data.json") 43 | delta = 0.001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_blazeface_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_blazeface_trt_fp32(): 32 | """ 33 | Inference and check value 34 | blazeface trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "blazeface_nas_128" 41 | tmp_path = os.path.join(TestBase.model_root, "Detection") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_bert_emb_v1_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import argparse 15 | import collections 16 | import os 17 | import sys 18 | import logging 19 | import struct 20 | import six 21 | 22 | import pytest 23 | import nose 24 | import numpy as np 25 | 26 | from test_gpu_helper import TestModelInferenceGPU 27 | 28 | TestBase = TestModelInferenceGPU(data_path="Data") 29 | 30 | 31 | @pytest.mark.p0 32 | def test_inference_bert_emb_v1_gpu(): 33 | """ 34 | Inference and check value 35 | bert_emb_v1 gpu model 36 | Args: 37 | None 38 | Return: 39 | None 40 | """ 41 | model_name = "bert_emb_v1-paddle" 42 | tmp_path = os.path.join(TestBase.model_root, "hub-ernie") 43 | model_path = os.path.join(tmp_path, model_name) 44 | data_path = os.path.join(tmp_path, "bert-data", "data.json") 45 | delta = 0.0001 46 | 47 | res, exp = TestBase.get_infer_results(model_path, data_path) 48 | 49 | for i in range(len(res)): 50 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 51 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_mobilenetv1_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_mobilenetv1_mkldnn(): 31 | """ 32 | Inference and check value 33 | mobilenetv1 mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "MobileNetV1_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_xception41_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_xception41_mkldnn(): 31 | """ 32 | Inference and check value 33 | xception41 mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "Xception_41_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_resnet50_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_resnet50_trt_fp32(): 32 | """ 33 | Inference and check value 34 | resnet50 trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "ResNet50_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_deeplabv3_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_deeplabv3_mkldnn(): 31 | """ 32 | Inference and check value 33 | deeplabv3_mobilenetv2 mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "deeplabv3_mobilenetv2" 40 | tmp_path = os.path.join(TestBase.model_root, "segmentation") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_det_mv3_db_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32(data_path="Data") 28 | 29 | 30 | @pytest.mark.p1 31 | def test_inference_det_mv3_db_trt_fp32(): 32 | """ 33 | Inference and check value 34 | det_mv3_db trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "det_mv3_db" 41 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 42 | model_path = os.path.join(tmp_path, model_name) 43 | data_path = os.path.join(tmp_path, "word_det_data", "data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_seresnext50_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN() 28 | 29 | @pytest.mark.p0 30 | def test_inference_seresnext50_mkldnn(): 31 | """ 32 | Inference and check value 33 | seresnext50 mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "SE_ResNeXt50_32x4d_pretrained" 40 | tmp_path = os.path.join(TestBase.model_root, "classification") 41 | model_path = os.path.join(tmp_path, model_name, "model") 42 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_xception41_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_xception41_trt_fp32(): 32 | """ 33 | Inference and check value 34 | xception41 trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "Xception_41_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_hub_ernie_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import argparse 15 | import collections 16 | import os 17 | import sys 18 | import logging 19 | import struct 20 | import six 21 | 22 | import pytest 23 | import nose 24 | import numpy as np 25 | 26 | from test_mkldnn_helper import TestModelInferenceMKLDNN 27 | 28 | TestBase = TestModelInferenceMKLDNN(data_path="Data") 29 | 30 | @pytest.mark.p0 31 | def test_inference_hub_ernie_mkldnn(): 32 | """ 33 | Inference and check value 34 | hub_ernie mkldnn model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "hub-ernie" 41 | tmp_path = os.path.join(TestBase.model_root, model_name) 42 | model_path = os.path.join(tmp_path, "hub-ernie-model") 43 | data_path = os.path.join(tmp_path, "hub-ernie-data", "data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | 51 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_deeplabv3_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_deeplabv3_trt_fp32(): 32 | """ 33 | Inference and check value 34 | deeplabv3_mobilenetv2 trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "deeplabv3_mobilenetv2" 41 | tmp_path = os.path.join(TestBase.model_root, "segmentation") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_mobilenetv1_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_mobilenetv1_trt_fp32(): 32 | """ 33 | Inference and check value 34 | mobilenetv1 trt_fp32 model 35 | Args: 36 | None 37 | Returns: 38 | None 39 | """ 40 | model_name = "MobileNetV1_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_benchmark/cc/PyTorch/src/clas_benchmark.cc: -------------------------------------------------------------------------------- 1 | #include "./torch_helper.h" 2 | 3 | namespace torch_infer { 4 | torch::Device LoadModel(torch::jit::script::Module *module){ 5 | torch::Device device = torch::Device(FLAGS_use_gpu ? "cuda:0" : "cpu"); 6 | module->to(device); 7 | return device; 8 | } 9 | 10 | double Inference(torch::jit::script::Module *module, 11 | torch::Device device){ 12 | std::cout << "create input tensor..." << std::endl; 13 | // Create a vector of inputs. 14 | std::vector inputs; 15 | inputs.push_back(torch::ones({FLAGS_batch_size, 3, 224,224}).to(device)); 16 | 17 | // warmup 18 | std::cout << "warm up..." << std::endl; 19 | for(int i=0; i < FLAGS_warmup_times; ++i){ 20 | at::Tensor output = module->forward(inputs).toTensor(); 21 | } 22 | 23 | // main process 24 | std::cout << "start to inference..." << std::endl; 25 | Timer pred_timer; // init prediction timer 26 | pred_timer.start(); // start timer 27 | for(int i=0; i < FLAGS_repeats; ++i){ 28 | at::Tensor output = module->forward(inputs).toTensor(); 29 | } 30 | pred_timer.stop(); // stop timer 31 | return pred_timer.report(); 32 | } 33 | 34 | int RunDemo() { 35 | torch::jit::script::Module module = torch::jit::load(FLAGS_model_path); 36 | torch::Device device = LoadModel(&module); 37 | auto total_time = Inference(&module, device); 38 | SummaryConfig(total_time); 39 | return 0; 40 | } 41 | 42 | } // namespace torch_infer 43 | 44 | 45 | int main(int argc, char**argv) { 46 | gflags::ParseCommandLineFlags(&argc, &argv, true); 47 | torch_infer::RunDemo(); 48 | return 0; 49 | } 50 | 51 | 52 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-new-api-case-mini.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | export gpu_type=`nvidia-smi -q | grep "Product Name" | head -n 1 | awk '{print $NF}'` 13 | 14 | mkdir -p $DATA_ROOT 15 | cd $DATA_ROOT 16 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 17 | echo "==== Download PaddleClas data and models ====" 18 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 19 | tar -zxf PaddleClas.tgz 20 | fi 21 | 22 | if [ ! -f PaddleDetection/infer_static/yolov3_darknet/__model__ ]; then 23 | echo "==== Download PaddleDetection data and models ====" 24 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleDetection.tgz --no-check-certificate 25 | tar -zxf PaddleDetection.tgz 26 | fi 27 | 28 | if [ ! -f PaddleOCR/ch_ppocr_mobile_v1.1_cls_infer/model ]; then 29 | echo "==== Download PaddleOCR data and models ====" 30 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleOCR.tgz --no-check-certificate 31 | tar -zxf PaddleOCR.tgz 32 | fi 33 | 34 | if [ ! -f PaddleSeg/infer_static/deeplabv3p/__model__ ]; then 35 | echo "==== Download PaddleSeg data and models ====" 36 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleSeg.tgz --no-check-certificate 37 | tar -zxf PaddleSeg.tgz 38 | fi 39 | 40 | cd - 41 | 42 | bash $CASE_ROOT/pd-clas-test-mini.sh 43 | bash $CASE_ROOT/pd-rcnn-test-mini.sh 44 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_seresnext50_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_seresnext50_trt_fp32(): 32 | """ 33 | Inference and check value 34 | seresnext50 trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "SE_ResNeXt50_32x4d_pretrained" 41 | tmp_path = os.path.join(TestBase.model_root, "classification") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_rec_chinese_common_train_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU(data_path="Data") 28 | 29 | @pytest.mark.p0 30 | def test_inference_rec_chinese_common_train_cpu(): 31 | """ 32 | Inference and check value 33 | rec_chinese_common_train cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "rec_chinese_common_train" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_rec_data", "data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-new-api-case-mini-trt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | export gpu_type=`nvidia-smi -q | grep "Product Name" | head -n 1 | awk '{print $NF}'` 13 | 14 | mkdir -p $DATA_ROOT 15 | cd $DATA_ROOT 16 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 17 | echo "==== Download PaddleClas data and models ====" 18 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 19 | tar -zxf PaddleClas.tgz 20 | fi 21 | 22 | if [ ! -f PaddleDetection/infer_static/yolov3_darknet/__model__ ]; then 23 | echo "==== Download PaddleDetection data and models ====" 24 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleDetection.tgz --no-check-certificate 25 | tar -zxf PaddleDetection.tgz 26 | fi 27 | 28 | if [ ! -f PaddleOCR/ch_ppocr_mobile_v1.1_cls_infer/model ]; then 29 | echo "==== Download PaddleOCR data and models ====" 30 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleOCR.tgz --no-check-certificate 31 | tar -zxf PaddleOCR.tgz 32 | fi 33 | 34 | if [ ! -f PaddleSeg/infer_static/deeplabv3p/__model__ ]; then 35 | echo "==== Download PaddleSeg data and models ====" 36 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleSeg.tgz --no-check-certificate 37 | tar -zxf PaddleSeg.tgz 38 | fi 39 | 40 | cd - 41 | 42 | bash $CASE_ROOT/pd-clas-test-mini-trt.sh 43 | bash $CASE_ROOT/pd-rcnn-test-mini-trt.sh 44 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_rec_chinese_common_train_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU(data_path="Data") 28 | 29 | 30 | @pytest.mark.p1 31 | def test_inference_rec_chinese_common_train_gpu(): 32 | """ 33 | Inference and check value 34 | rec_chinese_common_train gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "rec_chinese_common_train" 41 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 42 | model_path = os.path.join(tmp_path, model_name) 43 | data_path = os.path.join(tmp_path, "word_rec_data", "data.json") 44 | delta = 0.0001 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-new-api-case-jetson.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | export gpu_type=$1 13 | 14 | mkdir -p $DATA_ROOT 15 | cd $DATA_ROOT 16 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 17 | echo "==== Download PaddleClas data and models ====" 18 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 19 | tar -zxf PaddleClas.tgz 20 | fi 21 | 22 | if [ ! -f PaddleDetection/infer_static/yolov3_darknet/__model__ ]; then 23 | echo "==== Download PaddleDetection data and models ====" 24 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleDetection.tgz --no-check-certificate 25 | tar -zxf PaddleDetection.tgz 26 | fi 27 | 28 | #if [ ! -f PaddleOCR/ch_ppocr_mobile_v1.1_cls_infer/model ]; then 29 | # echo "==== Download PaddleOCR data and models ====" 30 | # wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleOCR.tgz --no-check-certificate 31 | # tar -zxf PaddleOCR.tgz 32 | #fi 33 | # 34 | #if [ ! -f PaddleSeg/infer_static/deeplabv3p/__model__ ]; then 35 | # echo "==== Download PaddleSeg data and models ====" 36 | # wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleSeg.tgz --no-check-certificate 37 | # tar -zxf PaddleSeg.tgz 38 | #fi 39 | 40 | cd - 41 | 42 | # bash $CASE_ROOT/pd-yolo-test-jetson.sh #迁移至paddletest仓库 43 | bash $CASE_ROOT/pd-clas-test-jetson.sh 44 | bash $CASE_ROOT/pd-rcnn-test-jetson.sh 45 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-new-api-case-mini-native.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | export gpu_type=`nvidia-smi -q | grep "Product Name" | head -n 1 | awk '{print $NF}'` 13 | 14 | mkdir -p $DATA_ROOT 15 | cd $DATA_ROOT 16 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 17 | echo "==== Download PaddleClas data and models ====" 18 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 19 | tar -zxf PaddleClas.tgz 20 | fi 21 | 22 | if [ ! -f PaddleDetection/infer_static/yolov3_darknet/__model__ ]; then 23 | echo "==== Download PaddleDetection data and models ====" 24 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleDetection.tgz --no-check-certificate 25 | tar -zxf PaddleDetection.tgz 26 | fi 27 | 28 | if [ ! -f PaddleOCR/ch_ppocr_mobile_v1.1_cls_infer/model ]; then 29 | echo "==== Download PaddleOCR data and models ====" 30 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleOCR.tgz --no-check-certificate 31 | tar -zxf PaddleOCR.tgz 32 | fi 33 | 34 | if [ ! -f PaddleSeg/infer_static/deeplabv3p/__model__ ]; then 35 | echo "==== Download PaddleSeg data and models ====" 36 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleSeg.tgz --no-check-certificate 37 | tar -zxf PaddleSeg.tgz 38 | fi 39 | 40 | cd - 41 | 42 | bash $CASE_ROOT/pd-clas-test-mini-native.sh 43 | bash $CASE_ROOT/pd-rcnn-test-mini-native.sh 44 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/cpu/test_rec_r34_vd_tps_bilstm_attn_cpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_cpu_helper import TestModelInferenceCPU 26 | 27 | TestBase = TestModelInferenceCPU(data_path="Data") 28 | 29 | @pytest.mark.p0 30 | def test_inference_rec_r34_vd_tps_bilstm_attn_cpu(): 31 | """ 32 | Inference and check value 33 | rec_r34_vd_tps_bilstm_attn cpu model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "rec_r34_vd_tps_bilstm_attn" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_rec_data_3_32_100", "data.json") 43 | delta = 0.0004 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_rec_r34_vd_tps_bilstm_attn_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU(data_path="Data") 28 | 29 | 30 | @pytest.mark.p1 31 | def test_inference_rec_r34_vd_tps_bilstm_attn_gpu(): 32 | """ 33 | Inference and check value 34 | rec_r34_vd_tps_bilstm_attn gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "rec_r34_vd_tps_bilstm_attn" 41 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 42 | model_path = os.path.join(tmp_path, model_name) 43 | data_path = os.path.join(tmp_path, "word_rec_data_3_32_100", "data.json") 44 | delta = 0.0009 45 | 46 | res, exp = TestBase.get_infer_results(model_path, data_path) 47 | 48 | for i in range(len(res)): 49 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-new-api-case-jetson-trt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | export gpu_type=$1 13 | 14 | mkdir -p $DATA_ROOT 15 | cd $DATA_ROOT 16 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 17 | echo "==== Download PaddleClas data and models ====" 18 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 19 | tar -zxf PaddleClas.tgz 20 | fi 21 | 22 | if [ ! -f PaddleDetection/infer_static/yolov3_darknet/__model__ ]; then 23 | echo "==== Download PaddleDetection data and models ====" 24 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleDetection.tgz --no-check-certificate 25 | tar -zxf PaddleDetection.tgz 26 | fi 27 | 28 | #if [ ! -f PaddleOCR/ch_ppocr_mobile_v1.1_cls_infer/model ]; then 29 | # echo "==== Download PaddleOCR data and models ====" 30 | # wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleOCR.tgz --no-check-certificate 31 | # tar -zxf PaddleOCR.tgz 32 | #fi 33 | # 34 | #if [ ! -f PaddleSeg/infer_static/deeplabv3p/__model__ ]; then 35 | # echo "==== Download PaddleSeg data and models ====" 36 | # wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleSeg.tgz --no-check-certificate 37 | # tar -zxf PaddleSeg.tgz 38 | #fi 39 | 40 | cd - 41 | 42 | # bash $CASE_ROOT/pd-yolo-test-jetson.sh #迁移至paddletest仓库 43 | bash $CASE_ROOT/pd-clas-test-jetson-trt.sh 44 | bash $CASE_ROOT/pd-rcnn-test-jetson-trt.sh 45 | 46 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_rec_chinese_common_train_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN(data_path="Data") 28 | 29 | @pytest.mark.p1 30 | def test_inference_rec_chinese_common_train_mkldnn(): 31 | """ 32 | Inference and check value 33 | rec_chinese_common_train mkldnn model 34 | Args: 35 | None 36 | Return: 37 | None 38 | """ 39 | model_name = "rec_chinese_common_train" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_rec_data", "data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/src/external-cmake/gtest-cpp.cmake: -------------------------------------------------------------------------------- 1 | find_package(Git REQUIRED) 2 | #include (ExternalProject) 3 | message("${CMAKE_BUILD_TYPE}") 4 | add_custom_target(thirdparty_gtest) 5 | SET(GTEST_PREFIX_DIR ${CMAKE_CURRENT_BINARY_DIR}/gtest) 6 | SET(GTEST_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}/gtest/src/extern_gtest) 7 | SET(GTEST_INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}/install/gtest) 8 | SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE) 9 | set(GTEST_REPOSITORY https://github.com/google/googletest.git) 10 | set(GTEST_TAG release-1.8.1) 11 | INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) 12 | IF(WIN32) 13 | set(GTEST_LIBRARIES 14 | "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE) 15 | set(GTEST_MAIN_LIBRARIES 16 | "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE) 17 | ELSE(WIN32) 18 | set(GTEST_LIBRARIES 19 | "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE) 20 | set(GTEST_MAIN_LIBRARIES 21 | "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest_main.a" CACHE FILEPATH "gtest main libraries." FORCE) 22 | ENDIF(WIN32) 23 | ExternalProject_Add( 24 | extern_gtest 25 | PREFIX gtest 26 | GIT_REPOSITORY ${GTEST_REPOSITORY} 27 | GIT_TAG ${GTEST_TAG} 28 | DOWNLOAD_DIR "${DOWNLOAD_LOCATION}" 29 | UPDATE_COMMAND "" 30 | CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} 31 | -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON 32 | -DCMAKE_BUILD_TYPE:STRING=Release 33 | ) 34 | add_dependencies(thirdparty_gtest extern_gtest) -------------------------------------------------------------------------------- /models/args/run_test: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export OPENBLAS_NUM_THREADS=1 4 | export CPU_NUM=1 5 | 6 | #pip install req 7 | pip install -r requirements.txt 8 | 9 | # get args 10 | ROOT_PATH=$1 11 | cd $ROOT_PATH 12 | 13 | # run models 14 | while read line 15 | do 16 | echo ${line} 17 | MODEL=`echo ${line} | awk -F [' \t'] '{print $1}'` 18 | RUN_PY=`echo ${line} | awk -F [' \t'] '{print $2}'` 19 | FUNCTION=`echo ${line} | awk -F [' \t'] '{print $3}'` 20 | COM_TYPE=`echo ${line} | awk -F [' \t'] '{print $4}'` 21 | TMP=`echo ${MODEL} | awk '{gsub("/", "_"); print $0}'` 22 | LOG_NAME=${TMP}-${RUN_PY}-${FUNCTION}-${COM_TYPE} 23 | 24 | cd models/${MODEL} 25 | cp ${ROOT_PATH}/run_args.py ./ 26 | cp ${ROOT_PATH}/run_args_dygraph.py ./ 27 | cp ${ROOT_PATH}/${MODEL}/run_prepare.sh ./ 28 | cp ${ROOT_PATH}/${MODEL}/test_args.py ./ 29 | 30 | sh run_prepare.sh ${ROOT_PATH} 31 | pwd 32 | ls -l 33 | if [ ${COM_TYPE} == "1" ]; then 34 | python run_args.py ${RUN_PY} ${FUNCTION} ${COM_TYPE} 1>${LOG_NAME}.log 2>${LOG_NAME}.log.wf 35 | elif [ ${COM_TYPE} == "2" ]; then 36 | python run_args_dygraph.py ${RUN_PY} ${FUNCTION} ${COM_TYPE} 1>${LOG_NAME}.log 2>${LOG_NAME}.log.wf 37 | fi 38 | echo "==================================" 39 | echo ${MODEL} 40 | cat ${LOG_NAME}.log 41 | cat ${LOG_NAME}.log.wf 42 | mv ${LOG_NAME}.log ${ROOT_PATH}/log/ 43 | mv ${LOG_NAME}.log.wf ${ROOT_PATH}/log/ 44 | 45 | #/bin/rm -rf * 46 | 47 | cd ${ROOT_PATH} 48 | done < ${ROOT_PATH}/conf/changed_models.conf 49 | 50 | ls -l ${ROOT_PATH}/log/ 51 | 52 | FF=`cat ${ROOT_PATH}/log/* | grep "FAIL" | wc -l` 53 | if [ $FF -gt 0 ] 54 | then 55 | exit 1 56 | fi 57 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-new-api-case-jetson-native.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | export gpu_type=$1 13 | 14 | mkdir -p $DATA_ROOT 15 | cd $DATA_ROOT 16 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 17 | echo "==== Download PaddleClas data and models ====" 18 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 19 | tar -zxf PaddleClas.tgz 20 | fi 21 | 22 | if [ ! -f PaddleDetection/infer_static/yolov3_darknet/__model__ ]; then 23 | echo "==== Download PaddleDetection data and models ====" 24 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleDetection.tgz --no-check-certificate 25 | tar -zxf PaddleDetection.tgz 26 | fi 27 | 28 | #if [ ! -f PaddleOCR/ch_ppocr_mobile_v1.1_cls_infer/model ]; then 29 | # echo "==== Download PaddleOCR data and models ====" 30 | # wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleOCR.tgz --no-check-certificate 31 | # tar -zxf PaddleOCR.tgz 32 | #fi 33 | # 34 | #if [ ! -f PaddleSeg/infer_static/deeplabv3p/__model__ ]; then 35 | # echo "==== Download PaddleSeg data and models ====" 36 | # wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleSeg.tgz --no-check-certificate 37 | # tar -zxf PaddleSeg.tgz 38 | #fi 39 | 40 | cd - 41 | 42 | # bash $CASE_ROOT/pd-yolo-test-jetson.sh #迁移至paddletest仓库 43 | bash $CASE_ROOT/pd-clas-test-jetson-native.sh 44 | bash $CASE_ROOT/pd-rcnn-test-jetson-native.sh 45 | 46 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/mkldnn/test_rec_r34_vd_tps_bilstm_attn_mkldnn.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_mkldnn_helper import TestModelInferenceMKLDNN 26 | 27 | TestBase = TestModelInferenceMKLDNN(data_path="Data") 28 | 29 | @pytest.mark.p1 30 | def test_inference_rec_r34_vd_tps_bilstm_attn_mkldnn(): 31 | """ 32 | Inference and check value 33 | rec_r34_vd_tps_bilstm_attn mkldnn model 34 | Args: 35 | None 36 | Returns: 37 | None 38 | """ 39 | model_name = "rec_r34_vd_tps_bilstm_attn" 40 | tmp_path = os.path.join(TestBase.model_root, "python-ocr-infer") 41 | model_path = os.path.join(tmp_path, model_name) 42 | data_path = os.path.join(tmp_path, "word_rec_data_3_32_100", "data.json") 43 | delta = 0.0001 44 | 45 | res, exp = TestBase.get_infer_results(model_path, data_path) 46 | 47 | for i in range(len(res)): 48 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 49 | 50 | 51 | -------------------------------------------------------------------------------- /inference/inference_api_test/cpp_api_test/bin/run-new-api-case.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | ROOT=`dirname "$0"` 6 | ROOT=`cd "$ROOT/.."; pwd` 7 | export OUTPUT=$ROOT/output 8 | export OUTPUT_BIN=$ROOT/build 9 | export DATA_ROOT=$ROOT/data 10 | export TOOLS_ROOT=$ROOT/tools 11 | export CASE_ROOT=$ROOT/bin 12 | export gpu_type=`nvidia-smi -q | grep "Product Name" | head -n 1 | awk '{print $NF}'` 13 | 14 | mkdir -p $DATA_ROOT 15 | cd $DATA_ROOT 16 | if [ ! -f PaddleClas/infer_static/AlexNet/__model__ ]; then 17 | echo "==== Download PaddleClas data and models ====" 18 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleClas.tgz --no-check-certificate 19 | tar -zxf PaddleClas.tgz 20 | fi 21 | 22 | if [ ! -f PaddleDetection/infer_static/yolov3_darknet/__model__ ]; then 23 | echo "==== Download PaddleDetection data and models ====" 24 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleDetection.tgz --no-check-certificate 25 | tar -zxf PaddleDetection.tgz 26 | fi 27 | 28 | if [ ! -f PaddleOCR/ch_ppocr_mobile_v1.1_cls_infer/model ]; then 29 | echo "==== Download PaddleOCR data and models ====" 30 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleOCR.tgz --no-check-certificate 31 | tar -zxf PaddleOCR.tgz 32 | fi 33 | 34 | if [ ! -f PaddleSeg/infer_static/deeplabv3p/__model__ ]; then 35 | echo "==== Download PaddleSeg data and models ====" 36 | wget --no-proxy -q https://sys-p0.bj.bcebos.com/Paddle-UnitTest-Model/PaddleSeg.tgz --no-check-certificate 37 | tar -zxf PaddleSeg.tgz 38 | fi 39 | 40 | cd - 41 | 42 | bash $CASE_ROOT/pd-yolo-test.sh 43 | bash $CASE_ROOT/pd-clas-test.sh 44 | bash $CASE_ROOT/pd-rcnn-test.sh 45 | bash $CASE_ROOT/pd-solo-test.sh 46 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_dete_dist_yolov3_v1_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU(data_path="Data") 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_dete_dist_yolov3_v1_gpu(): 32 | """ 33 | Inference and check value 34 | dete_dist_yolov3_v1 gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "dete_dist_yolov3_v1_uncombined" 41 | model_path = os.path.join(TestBase.model_root, "python-slim-infer", 42 | model_name) 43 | data_path = os.path.join( 44 | TestBase.model_root, 45 | "python-model-infer/Detection/yolov3_darknet/data/data.json") 46 | delta = 0.001 47 | 48 | res, exp = TestBase.get_infer_results(model_path, data_path) 49 | 50 | for i in range(len(res)): 51 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 52 | -------------------------------------------------------------------------------- /models/args/PaddleNLP/pretrain_language_models/XLNet/test_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | this is xlnet args 4 | """ 5 | 6 | finetune_squad = { 7 | "model_config_path": ["xlnet_cased_L-24_H-1024_A-16/xlnet_config.json"], 8 | "spiece_model_file": ["xlnet_cased_L-24_H-1024_A-16/spiece.model"], 9 | "init_checkpoint": ["xlnet_cased_L-24_H-1024_A-16/params"], 10 | "checkpoints": ["checkpoints_squad_2.0"], 11 | "epoch": [1], 12 | "train_steps": [10], 13 | "warmup_steps": [5], 14 | "save_steps": [5], 15 | "skip_steps": [5], 16 | "train_file": ["args_test_data/squad2.0/train-v2.0.json"], 17 | "predict_file": ["args_test_data/squad2.0/dev-v2.0.json"], 18 | "random_seed": [0, 100], 19 | "use_cuda": [True], 20 | "uncased": [False], 21 | "verbose": [True] 22 | } 23 | 24 | finetune_cls = { 25 | "do_train": [True], 26 | "do_eval": [False, True], 27 | "do_predict": [True, False], 28 | "task_name": ["sts-b"], 29 | "data_dir": ["args_test_data/STS-B"], 30 | "checkpoints": ["checkpoints_sts-b"], 31 | "uncased": [False], 32 | "spiece_model_file": ["xlnet_cased_L-24_H-1024_A-16/spiece.model"], 33 | "model_config_path": ["xlnet_cased_L-24_H-1024_A-16/xlnet_config.json"], 34 | "init_pretraining_params": ["xlnet_cased_L-24_H-1024_A-16/params"], 35 | "max_seq_length": [128], 36 | "train_batch_size": [8], 37 | "learning_rate": [5e-5], 38 | "predict_dir": ["exp/sts-b"], 39 | "skip_steps": [10], 40 | "train_steps": [1200], 41 | "warmup_steps": [120], 42 | "save_steps": [600], 43 | "is_regression": [True], 44 | "epoch": [1], 45 | "verbose": [True], 46 | "random_seed": [0, 100], 47 | "use_cuda": [True], 48 | "shuffle": [True, False], 49 | "eval_batch_size": [2], 50 | } 51 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_mask_rcnn_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_mask_rcnn_trt_fp32(): 32 | """ 33 | Inference and check value 34 | mask_rcnn trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "mask_rcnn_r50_1x" 41 | tmp_path = os.path.join(TestBase.model_root, "Detection") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.001 45 | min_subgraph_size = 40 46 | 47 | res, exp = TestBase.get_infer_results(model_path, data_path, 48 | min_subgraph_size) 49 | 50 | for i in range(len(res)): 51 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 52 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/trt_fp32/test_faster_rcnn_trt_fp32.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_trt_fp32_helper import TestModelInferenceTrtFp32 26 | 27 | TestBase = TestModelInferenceTrtFp32() 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_faster_rcnn_trt_fp32(): 32 | """ 33 | Inference and check value 34 | faster_rcnn trt_fp32 model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "faster_rcnn_r50_1x" 41 | tmp_path = os.path.join(TestBase.model_root, "Detection") 42 | model_path = os.path.join(tmp_path, model_name, "model") 43 | data_path = os.path.join(tmp_path, model_name, "data/data.json") 44 | delta = 0.001 45 | min_subgraph_size = 40 46 | 47 | res, exp = TestBase.get_infer_results(model_path, data_path, 48 | min_subgraph_size) 49 | 50 | for i in range(len(res)): 51 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 52 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_dete_prune_mask_rcnn_r50_1x_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU(data_path="Data") 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_dete_prune_mask_rcnn_r50_1x_gpu(): 32 | """ 33 | Inference and check value 34 | dete_prune_mask_rcnn_r50_1x gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "dete_prune_mask_rcnn_r50_1x_combined" 41 | model_path = os.path.join(TestBase.model_root, "python-slim-infer", 42 | model_name) 43 | data_path = os.path.join( 44 | TestBase.model_root, 45 | "python-model-infer/Detection/mask_rcnn_r50_1x/data/data.json") 46 | delta = 0.001 47 | 48 | res, exp = TestBase.get_infer_results(model_path, data_path) 49 | 50 | for i in range(len(res)): 51 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 52 | -------------------------------------------------------------------------------- /inference/inference_api_test/python_api_test/tests/gpu/test_dete_prune_yolov3_darknet_voc_gpu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import os 15 | import sys 16 | import argparse 17 | import logging 18 | import struct 19 | import six 20 | 21 | import pytest 22 | import nose 23 | import numpy as np 24 | 25 | from test_gpu_helper import TestModelInferenceGPU 26 | 27 | TestBase = TestModelInferenceGPU(data_path="Data") 28 | 29 | 30 | @pytest.mark.p0 31 | def test_inference_dete_prune_yolov3_darknet_voc_gpu(): 32 | """ 33 | Inference and check value 34 | dete_prune_yolov3_darknet_voc gpu model 35 | Args: 36 | None 37 | Return: 38 | None 39 | """ 40 | model_name = "dete_prune_yolov3_darknet_voc_combined" 41 | model_path = os.path.join(TestBase.model_root, "python-slim-infer", 42 | model_name) 43 | data_path = os.path.join( 44 | TestBase.model_root, 45 | "python-model-infer/Detection/yolov3_darknet/data/data.json") 46 | delta = 0.001 47 | 48 | res, exp = TestBase.get_infer_results(model_path, data_path) 49 | 50 | for i in range(len(res)): 51 | TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta) 52 | --------------------------------------------------------------------------------