├── .gitignore
├── LICENSE
├── README.md
├── examples
├── AnimeGAN
│ ├── README.md
│ ├── main.py
│ ├── processor.py
│ ├── save_img
│ │ └── animegan_v1_hayao_60.jpg
│ └── test.jpg
├── ExtremeC3Net
│ ├── README.md
│ ├── main.py
│ ├── processor.py
│ ├── save_img
│ │ ├── result_ExtremeC3_Portrait_Segmentation.png
│ │ └── result_mask_ExtremeC3_Portrait_Segmentation.png
│ └── test.jpg
├── MiDaS
│ ├── README.md
│ ├── main.py
│ ├── processor.py
│ ├── save_img
│ │ ├── MiDaS_Small.pfm
│ │ └── MiDaS_Small.png
│ ├── test.jpg
│ ├── transforms.py
│ └── utils.py
├── OpenPose
│ └── HandsEstimation
│ │ ├── README.md
│ │ ├── main.py
│ │ ├── processor.py
│ │ ├── save_img
│ │ └── openpose_hands_estimation.jpg
│ │ └── test.jpg
├── PyramidBox
│ ├── README.md
│ ├── main.py
│ ├── processor.py
│ ├── save_img
│ │ └── pyramidbox_lite_mobile.jpg
│ └── test.jpg
├── SINet
│ ├── README.md
│ ├── main.py
│ ├── processor.py
│ ├── save_img
│ │ ├── result_SINet_Portrait_Segmentation.png
│ │ └── result_mask_SINet_Portrait_Segmentation.png
│ └── test.jpg
├── U2Net
│ └── PortraitGeneration
│ │ ├── README.md
│ │ ├── main.py
│ │ ├── processor.py
│ │ ├── save_img
│ │ └── u2net_portrait.jpg
│ │ └── test.jpg
└── UGATIT
│ ├── README.md
│ ├── main.py
│ ├── processor.py
│ ├── save_img
│ └── UGATIT_100w.jpg
│ └── test.jpg
├── ppqi
├── __init__.py
└── inference.py
├── requirements.txt
├── setup.py
└── wheel
└── ppqi-1.0.4-py3-none-any.whl
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | # pretrained models
132 | examples/AnimeGAN/animegan_v1_hayao_60/
133 | examples/ExtremeC3Net/ExtremeC3_Portrait_Segmentation/
134 | examples/MiDaS/MiDaS_Small/
135 | examples/OpenPose/HandsEstimation/openpose_hands_estimation/
136 | examples/PyramidBox/pyramidbox_lite_mobile/
137 | examples/SINet/SINet_Portrait_Segmentation/
138 | examples/U2Net/PortraitGeneration/u2net_portrait.*
139 | examples/UGATIT/UGATIT_100w/
140 |
141 | # build
142 | build/
143 | dist/
144 | ppqi.egg-info/
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Paddle Quick Inference
2 | * 一个基于 [Paddle Inference](https://paddle-inference.readthedocs.io) 封装的用于快速部署的高层 API
3 | * 本项目基于 Paddle 2.0 版本开发
4 |
5 | ## 高层 API 特点
6 | * 使用方便快捷
7 | * 代码类似动态图模型的加载和预测
8 | * 将常用的 Config 和 Predictor 配置进行封装
9 | * 留出额外的接口以兼容一些特殊的配置操作
10 |
11 | ## 模型格式
12 | * 目前本项目支持四种推理模型格式,具体请参考下表:
13 |
14 | | 模型计算图 | 模型参数 |
15 | | -------- | -------- |
16 | | \_\_model\_\_ | \* |
17 | | model | params |
18 | | \_\_model\_\_ | \_\_params\_\_ |
19 | | \*.pdmodel | \*.pdiparams |
20 |
21 | # 安装
22 | * 直接安装
23 | ```shell
24 | $ pip install ppqi -i https://pypi.python.org/simple
25 | ```
26 | * 通过 wheel 包进行安装:[下载链接](https://github.com/jm12138/PaddleQuickInference/releases)
27 | ```shell
28 | $ pip install [path to whl]
29 | ```
30 |
31 | ## 快速使用
32 | ```python
33 | import numpy as np
34 | from ppqi import InferenceModel
35 |
36 | # 加载推理模型
37 | model = InferenceModel([Inference Model Path])
38 | model.eval()
39 |
40 | # 准备数据
41 | inputs = np.random.randn(batch_size, n1, n2, n3, ...).astype(np.float32)
42 |
43 | # 前向计算
44 | outputs = model(inputs)
45 | ```
46 |
47 | ## API 说明
48 | ```python
49 | '''
50 | modelpath:推理模型路径
51 | use_gpu:是否使用 GPU 进行推理
52 | gpu_id:设置使用的 GPU ID
53 | use_mkldnn:是否使用 MKLDNN 库进行 CPU 推理加速
54 | cpu_threads:设置计算库的所使用 CPU 线程数
55 |
56 | 还可以通过 InferenceModel.config 来对其他选项进行配置
57 | 如配置 TensorRT:
58 | model.config.enable_tensorrt_engine(
59 | workspace_size = 1 << 20,
60 | max_batch_size = 1,
61 | min_subgraph_size = 3,
62 | precision_mode=paddle.inference.PrecisionType.Float32,
63 | use_static = False,
64 | use_calib_mode = False
65 | )
66 | '''
67 | model = InferenceModel(
68 | modelpath=[Inference Model Path],
69 | use_gpu=False,
70 | gpu_id=0,
71 | use_mkldnn=False,
72 | cpu_threads=1
73 | )
74 |
75 | '''
76 | 将模型设置为推理模式
77 | 实际上是使用 Config 创建 Predictor
78 | '''
79 | model.eval()
80 |
81 | '''
82 | 创建完 Predictor 后
83 | 可打印出模型的输入输出节点的数量和名称
84 | '''
85 | print(model)
86 |
87 | '''
88 | 根据输入节点的数量和名称准备好数据
89 | 数据格式为 Ndarray
90 | '''
91 | inputs = np.random.randn(batch_size, n1, n2, n3, ...).astype(np.float32)
92 |
93 | '''
94 | 模型前向计算
95 | 根据输入节点顺序传入输入数据
96 | batch_size:推理数据批大小
97 | 返回结果格式为所有输出节点的输出
98 | 数据格式为 Ndarray
99 | '''
100 | outputs = model(input_datas, batch_size=4)
101 | ```
102 |
103 | ## 部署案例
104 | * [街景动漫化模型 AnimeGAN](./examples/AnimeGAN)
105 | * [人像动漫化模型 UGATIT](./examples/UGATIT)
106 | * [单目深度估计模型 MiDaS](./examples/MiDaS)
107 | * [人脸素描生成 U2Net Portrait Generation ](./examples/U2Net/PortraitGeneration)
108 | * [人脸检测模型 Pyramid Box](./examples/PyramidBox)
109 | * [人像分割模型 SINet Portrait Segmentation](./examples/SINet)
110 | * [人像分割模型 ExtremeC3Net Portrait Segmentation](./examples/ExtremeC3Net)
111 | * [手部关键点检测模型 OpenPose Hands Estimation](./examples/OpenPose/HandsEstimation)
112 |
113 | ## Contact us
114 | Email : [agentmaker@163.com]()
115 | QQ Group : 1005109853
116 |
--------------------------------------------------------------------------------
/examples/AnimeGAN/README.md:
--------------------------------------------------------------------------------
1 | # **AnimeGAN的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 |
6 | ## 预训练推理模型下载
7 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
8 |
9 | ## 代码示例
10 | ```python
11 | # main.py
12 | from ppqi import InferenceModel
13 | from processor import preprocess, postprocess
14 |
15 | # 参数配置
16 | configs = {
17 | 'img_path': 'test.jpg',
18 | 'save_dir': 'save_img',
19 | 'model_name': 'animegan_v1_hayao_60',
20 | 'use_gpu': False,
21 | 'use_mkldnn': False,
22 | 'max_size': 512,
23 | 'min_size': 32
24 | }
25 |
26 | # 第一步:数据预处理
27 | input_data = preprocess(
28 | configs['img_path'],
29 | configs['max_size'],
30 | configs['min_size']
31 | )
32 |
33 | # 第二步:加载模型
34 | model = InferenceModel(
35 | modelpath=configs['model_name'],
36 | use_gpu=configs['use_gpu'],
37 | use_mkldnn=configs['use_mkldnn']
38 | )
39 | model.eval()
40 |
41 | # 第三步:模型推理
42 | output = model(input_data)
43 |
44 | # 第四步:结果后处理
45 | postprocess(
46 | output,
47 | configs['save_dir'],
48 | configs['model_name']
49 | )
50 | ```
51 |
--------------------------------------------------------------------------------
/examples/AnimeGAN/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'animegan_v1_hayao_60',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False,
11 | 'max_size': 512,
12 | 'min_size': 32
13 | }
14 |
15 | # 第一步:数据预处理
16 | input_data = preprocess(
17 | configs['img_path'],
18 | configs['max_size'],
19 | configs['min_size']
20 | )
21 |
22 | # 第二步:加载模型
23 | model = InferenceModel(
24 | modelpath=configs['model_name'],
25 | use_gpu=configs['use_gpu'],
26 | use_mkldnn=configs['use_mkldnn']
27 | )
28 | model.eval()
29 |
30 | # 第三步:模型推理
31 | output = model(input_data)
32 |
33 | # 第四步:结果后处理
34 | postprocess(
35 | output,
36 | configs['save_dir'],
37 | configs['model_name']
38 | )
--------------------------------------------------------------------------------
/examples/AnimeGAN/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 | __all__ = ['preprocess', 'postprocess']
6 |
7 | def preprocess(img_path, max_size=512, min_size=32):
8 | # 读取图片
9 | img = cv2.imread(img_path)
10 |
11 | # 格式转换
12 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
13 |
14 | # 缩放图片
15 | h, w = img.shape[:2]
16 | if max(h,w)>max_size:
17 | img = cv2.resize(img, (max_size, int(h/w*max_size))) if hw else cv2.resize(img, (int(w/h*min_size), min_size))
20 |
21 | # 裁剪图片
22 | h, w = img.shape[:2]
23 | img = img[:h-(h%32), :w-(w%32), :]
24 |
25 | # 归一化
26 | img = img/127.5 - 1.0
27 |
28 | # 新建维度
29 | img = np.expand_dims(img, axis=0).astype('float32')
30 |
31 | # 返回输入数据
32 | return img
33 |
34 | def postprocess(output, output_dir, model_name):
35 | # 反归一化
36 | image = (output.squeeze() + 1.) / 2 * 255
37 |
38 | # 限幅
39 | image = np.clip(image, 0, 255).astype(np.uint8)
40 |
41 | # 格式转换
42 | image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
43 |
44 | # 检查输出目录
45 | if not os.path.exists(output_dir):
46 | os.makedirs(output_dir)
47 |
48 | # 写入输出图片
49 | cv2.imwrite(os.path.join(output_dir, '%s.jpg' % model_name), image)
--------------------------------------------------------------------------------
/examples/AnimeGAN/save_img/animegan_v1_hayao_60.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/AnimeGAN/save_img/animegan_v1_hayao_60.jpg
--------------------------------------------------------------------------------
/examples/AnimeGAN/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/AnimeGAN/test.jpg
--------------------------------------------------------------------------------
/examples/ExtremeC3Net/README.md:
--------------------------------------------------------------------------------
1 | # **ExtremeC3Net的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 | 
6 |
7 | ## 预训练推理模型下载
8 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
9 |
10 | ## 代码示例
11 | ```python
12 | # main.py
13 | from ppqi import InferenceModel
14 | from processor import preprocess, postprocess
15 |
16 | # 参数配置
17 | configs = {
18 | 'img_path': 'test.jpg',
19 | 'save_dir': 'save_img',
20 | 'model_name': 'ExtremeC3_Portrait_Segmentation',
21 | 'use_gpu': False,
22 | 'use_mkldnn': False
23 | }
24 |
25 | # 第一步:数据预处理
26 | input_data = preprocess(configs['img_path'])
27 |
28 | # 第二步:加载模型
29 | model = InferenceModel(
30 | modelpath=configs['model_name'],
31 | use_gpu=configs['use_gpu'],
32 | use_mkldnn=configs['use_mkldnn']
33 | )
34 | model.eval()
35 |
36 | # 第三步:模型推理
37 | output = model(input_data)
38 |
39 | # 第四步:结果后处理
40 | postprocess(
41 | output,
42 | configs['save_dir'],
43 | configs['img_path'],
44 | configs['model_name']
45 | )
46 | ```
47 |
--------------------------------------------------------------------------------
/examples/ExtremeC3Net/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'ExtremeC3_Portrait_Segmentation',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False
11 | }
12 |
13 | # 第一步:数据预处理
14 | input_data = preprocess(configs['img_path'])
15 |
16 | # 第二步:加载模型
17 | model = InferenceModel(
18 | modelpath=configs['model_name'],
19 | use_gpu=configs['use_gpu'],
20 | use_mkldnn=configs['use_mkldnn']
21 | )
22 | model.eval()
23 |
24 | # 第三步:模型推理
25 | output = model(input_data)
26 |
27 | # 第四步:结果后处理
28 | postprocess(
29 | output,
30 | configs['save_dir'],
31 | configs['img_path'],
32 | configs['model_name']
33 | )
--------------------------------------------------------------------------------
/examples/ExtremeC3Net/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 | __all__ = ['preprocess', 'postprocess']
6 |
7 | mean = [107.304565, 115.69884, 132.35703 ]
8 | std = [63.97182, 65.1337, 68.29726]
9 |
10 | # 预处理函数
11 | def preprocess(img_path):
12 | # 读取图像
13 | img = cv2.imread(img_path)
14 |
15 | # 缩放
16 | h, w = img.shape[:2]
17 | img = cv2.resize(img, (224, 224))
18 |
19 | # 格式转换
20 | img = img.astype(np.float32)
21 |
22 | # 归一化
23 | for j in range(3):
24 | img[:, :, j] -= mean[j]
25 | for j in range(3):
26 | img[:, :, j] /= std[j]
27 | img /= 255.
28 |
29 | # 格式转换
30 | img = img.transpose((2, 0, 1))
31 | img = img[np.newaxis, ...]
32 |
33 | return img
34 |
35 | # 后处理函数
36 | def postprocess(results, output_dir, img_path, model_name):
37 | # 检查输出目录
38 | if not os.path.exists(output_dir):
39 | os.mkdir(output_dir)
40 |
41 | # 读取图像
42 | img = cv2.imread(img_path)
43 |
44 | # 计算MASK
45 | mask = (results[0][0] > 0).astype('float32')
46 |
47 | # 缩放
48 | h, w = img.shape[:2]
49 | mask = cv2.resize(mask, (w, h))
50 |
51 | # 计算输出图像
52 | result = (img * mask[..., np.newaxis] + (1 - mask[..., np.newaxis]) * 255).astype(np.uint8)
53 |
54 | # 格式还原
55 | mask = (mask * 255).astype(np.uint8)
56 |
57 | # 可视化
58 | cv2.imwrite(os.path.join(output_dir, 'result_mask_%s.png' % model_name), mask)
59 | cv2.imwrite(os.path.join(output_dir, 'result_%s.png' % model_name), result)
--------------------------------------------------------------------------------
/examples/ExtremeC3Net/save_img/result_ExtremeC3_Portrait_Segmentation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/ExtremeC3Net/save_img/result_ExtremeC3_Portrait_Segmentation.png
--------------------------------------------------------------------------------
/examples/ExtremeC3Net/save_img/result_mask_ExtremeC3_Portrait_Segmentation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/ExtremeC3Net/save_img/result_mask_ExtremeC3_Portrait_Segmentation.png
--------------------------------------------------------------------------------
/examples/ExtremeC3Net/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/ExtremeC3Net/test.jpg
--------------------------------------------------------------------------------
/examples/MiDaS/README.md:
--------------------------------------------------------------------------------
1 | # **MiDaS的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 |
6 | ## 预训练推理模型下载
7 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
8 |
9 | ## 代码示例
10 | ```python
11 | # main.py
12 | from ppqi import InferenceModel
13 | from processor import preprocess, postprocess
14 |
15 | # 参数配置
16 | configs = {
17 | 'img_path': 'test.jpg',
18 | 'save_dir': 'save_img',
19 | 'model_name': 'MiDaS_Small',
20 | 'use_gpu': False,
21 | 'use_mkldnn': False
22 | }
23 |
24 | # 第一步:数据预处理
25 | input_data = preprocess(
26 | configs['img_path'],
27 | size=256
28 | )
29 |
30 | # 第二步:加载模型
31 | model = InferenceModel(
32 | modelpath=configs['model_name'],
33 | use_gpu=configs['use_gpu'],
34 | use_mkldnn=configs['use_mkldnn']
35 | )
36 | model.eval()
37 |
38 | # 第三步:模型推理
39 | output = model(input_data)
40 |
41 | # 第四步:结果后处理
42 | postprocess(
43 | output,
44 | configs['save_dir'],
45 | configs['img_path'],
46 | configs['model_name']
47 | )
48 | ```
49 |
--------------------------------------------------------------------------------
/examples/MiDaS/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'MiDaS_Small',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False
11 | }
12 |
13 | # 第一步:数据预处理
14 | input_data = preprocess(
15 | configs['img_path'],
16 | size=256
17 | )
18 |
19 | # 第二步:加载模型
20 | model = InferenceModel(
21 | modelpath=configs['model_name'],
22 | use_gpu=configs['use_gpu'],
23 | use_mkldnn=configs['use_mkldnn']
24 | )
25 | model.eval()
26 |
27 | # 第三步:模型推理
28 | output = model(input_data)
29 |
30 | # 第四步:结果后处理
31 | postprocess(
32 | output,
33 | configs['save_dir'],
34 | configs['img_path'],
35 | configs['model_name']
36 | )
--------------------------------------------------------------------------------
/examples/MiDaS/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | from utils import write_depth
5 | from paddle.vision.transforms import Compose
6 | from transforms import Resize, NormalizeImage, PrepareForNet
7 |
8 | __all__ = ['preprocess', 'postprocess']
9 |
10 | # 数据预处理函数
11 | def preprocess(img_path, size):
12 | # 图像变换
13 | transform = Compose([
14 | Resize(
15 | size,
16 | size,
17 | resize_target=None,
18 | keep_aspect_ratio=False,
19 | ensure_multiple_of=32,
20 | resize_method="upper_bound",
21 | image_interpolation_method=cv2.INTER_CUBIC,
22 | ),
23 | NormalizeImage(mean=[0.485, 0.456, 0.406],
24 | std=[0.229, 0.224, 0.225]),
25 | PrepareForNet()
26 | ])
27 |
28 | # 读取图片
29 | img = cv2.imread(img_path)
30 |
31 | # 归一化
32 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
33 |
34 | # 图像变换
35 | img = transform({"image": img})["image"]
36 |
37 | # 新增维度
38 | img = img[np.newaxis, ...]
39 |
40 | return img
41 |
42 | # 数据后处理函数
43 | def postprocess(results, output_dir, img_path, model_name):
44 | # 检查输出目录
45 | if not os.path.exists(output_dir):
46 | os.mkdir(output_dir)
47 |
48 | # 读取输入图像
49 | img = cv2.imread(img_path)
50 | h, w = img.shape[:2]
51 |
52 | # 缩放回原尺寸
53 | output = cv2.resize(results[0], (w, h), interpolation=cv2.INTER_CUBIC)
54 |
55 | # 可视化输出
56 | pfm_f, png_f = write_depth(os.path.join(output_dir, model_name), output, bits=2)
--------------------------------------------------------------------------------
/examples/MiDaS/save_img/MiDaS_Small.pfm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/MiDaS/save_img/MiDaS_Small.pfm
--------------------------------------------------------------------------------
/examples/MiDaS/save_img/MiDaS_Small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/MiDaS/save_img/MiDaS_Small.png
--------------------------------------------------------------------------------
/examples/MiDaS/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/MiDaS/test.jpg
--------------------------------------------------------------------------------
/examples/MiDaS/transforms.py:
--------------------------------------------------------------------------------
1 | # Refer https://github.com/intel-isl/MiDaS
2 |
3 | import numpy as np
4 | import cv2
5 |
6 |
7 | class Resize(object):
8 | """Resize sample to given size (width, height).
9 | """
10 | def __init__(self,
11 | width,
12 | height,
13 | resize_target=True,
14 | keep_aspect_ratio=False,
15 | ensure_multiple_of=1,
16 | resize_method="lower_bound",
17 | image_interpolation_method=cv2.INTER_AREA):
18 | """Init.
19 |
20 | Args:
21 | width (int): desired output width
22 | height (int): desired output height
23 | resize_target (bool, optional):
24 | True: Resize the full sample (image, mask, target).
25 | False: Resize image only.
26 | Defaults to True.
27 | keep_aspect_ratio (bool, optional):
28 | True: Keep the aspect ratio of the input sample.
29 | Output sample might not have the given width and height, and
30 | resize behaviour depends on the parameter 'resize_method'.
31 | Defaults to False.
32 | ensure_multiple_of (int, optional):
33 | Output width and height is constrained to be multiple of this parameter.
34 | Defaults to 1.
35 | resize_method (str, optional):
36 | "lower_bound": Output will be at least as large as the given size.
37 | "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
38 | "minimal": Scale as least as possible. (Output size might be smaller than given size.)
39 | Defaults to "lower_bound".
40 | """
41 | self.__width = width
42 | self.__height = height
43 |
44 | self.__resize_target = resize_target
45 | self.__keep_aspect_ratio = keep_aspect_ratio
46 | self.__multiple_of = ensure_multiple_of
47 | self.__resize_method = resize_method
48 | self.__image_interpolation_method = image_interpolation_method
49 |
50 | def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
51 | y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
52 |
53 | if max_val is not None and y > max_val:
54 | y = (np.floor(x / self.__multiple_of) *
55 | self.__multiple_of).astype(int)
56 |
57 | if y < min_val:
58 | y = (np.ceil(x / self.__multiple_of) *
59 | self.__multiple_of).astype(int)
60 |
61 | return y
62 |
63 | def get_size(self, width, height):
64 | # determine new height and width
65 | scale_height = self.__height / height
66 | scale_width = self.__width / width
67 |
68 | if self.__keep_aspect_ratio:
69 | if self.__resize_method == "lower_bound":
70 | # scale such that output size is lower bound
71 | if scale_width > scale_height:
72 | # fit width
73 | scale_height = scale_width
74 | else:
75 | # fit height
76 | scale_width = scale_height
77 | elif self.__resize_method == "upper_bound":
78 | # scale such that output size is upper bound
79 | if scale_width < scale_height:
80 | # fit width
81 | scale_height = scale_width
82 | else:
83 | # fit height
84 | scale_width = scale_height
85 | elif self.__resize_method == "minimal":
86 | # scale as least as possbile
87 | if abs(1 - scale_width) < abs(1 - scale_height):
88 | # fit width
89 | scale_height = scale_width
90 | else:
91 | # fit height
92 | scale_width = scale_height
93 | else:
94 | raise ValueError(
95 | f"resize_method {self.__resize_method} not implemented")
96 |
97 | if self.__resize_method == "lower_bound":
98 | new_height = self.constrain_to_multiple_of(scale_height * height,
99 | min_val=self.__height)
100 | new_width = self.constrain_to_multiple_of(scale_width * width,
101 | min_val=self.__width)
102 | elif self.__resize_method == "upper_bound":
103 | new_height = self.constrain_to_multiple_of(scale_height * height,
104 | max_val=self.__height)
105 | new_width = self.constrain_to_multiple_of(scale_width * width,
106 | max_val=self.__width)
107 | elif self.__resize_method == "minimal":
108 | new_height = self.constrain_to_multiple_of(scale_height * height)
109 | new_width = self.constrain_to_multiple_of(scale_width * width)
110 | else:
111 | raise ValueError(
112 | f"resize_method {self.__resize_method} not implemented")
113 |
114 | return (new_width, new_height)
115 |
116 | def __call__(self, sample):
117 | width, height = self.get_size(sample["image"].shape[1],
118 | sample["image"].shape[0])
119 |
120 | # resize sample
121 | sample["image"] = cv2.resize(
122 | sample["image"],
123 | (width, height),
124 | interpolation=self.__image_interpolation_method,
125 | )
126 |
127 | if self.__resize_target:
128 | if "disparity" in sample:
129 | sample["disparity"] = cv2.resize(
130 | sample["disparity"],
131 | (width, height),
132 | interpolation=cv2.INTER_NEAREST,
133 | )
134 |
135 | if "depth" in sample:
136 | sample["depth"] = cv2.resize(sample["depth"], (width, height),
137 | interpolation=cv2.INTER_NEAREST)
138 |
139 | sample["mask"] = cv2.resize(
140 | sample["mask"].astype(np.float32),
141 | (width, height),
142 | interpolation=cv2.INTER_NEAREST,
143 | )
144 | sample["mask"] = sample["mask"].astype(bool)
145 |
146 | return sample
147 |
148 |
149 | class NormalizeImage(object):
150 | """Normlize image by given mean and std.
151 | """
152 | def __init__(self, mean, std):
153 | self.__mean = mean
154 | self.__std = std
155 |
156 | def __call__(self, sample):
157 | sample["image"] = (sample["image"] - self.__mean) / self.__std
158 |
159 | return sample
160 |
161 |
162 | class PrepareForNet(object):
163 | """Prepare sample for usage as network input.
164 | """
165 | def __init__(self):
166 | pass
167 |
168 | def __call__(self, sample):
169 | image = np.transpose(sample["image"], (2, 0, 1))
170 | sample["image"] = np.ascontiguousarray(image).astype(np.float32)
171 |
172 | if "mask" in sample:
173 | sample["mask"] = sample["mask"].astype(np.float32)
174 | sample["mask"] = np.ascontiguousarray(sample["mask"])
175 |
176 | if "disparity" in sample:
177 | disparity = sample["disparity"].astype(np.float32)
178 | sample["disparity"] = np.ascontiguousarray(disparity)
179 |
180 | if "depth" in sample:
181 | depth = sample["depth"].astype(np.float32)
182 | sample["depth"] = np.ascontiguousarray(depth)
183 |
184 | return sample
185 |
--------------------------------------------------------------------------------
/examples/MiDaS/utils.py:
--------------------------------------------------------------------------------
1 | # Refer https://github.com/intel-isl/MiDaS
2 | """Utils for monoDepth.
3 | """
4 | import sys
5 | import numpy as np
6 | import cv2
7 |
8 |
9 | def write_pfm(path, image, scale=1):
10 | """Write pfm file.
11 |
12 | Args:
13 | path (str): pathto file
14 | image (array): data
15 | scale (int, optional): Scale. Defaults to 1.
16 | """
17 |
18 | with open(path, "wb") as file:
19 | color = None
20 |
21 | if image.dtype.name != "float32":
22 | raise Exception("Image dtype must be float32.")
23 |
24 | image = np.flipud(image)
25 |
26 | if len(image.shape) == 3 and image.shape[2] == 3: # color image
27 | color = True
28 | elif (len(image.shape) == 2
29 | or len(image.shape) == 3 and image.shape[2] == 1): # greyscale
30 | color = False
31 | else:
32 | raise Exception(
33 | "Image must have H x W x 3, H x W x 1 or H x W dimensions.")
34 |
35 | file.write("PF\n" if color else "Pf\n".encode())
36 | file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
37 |
38 | endian = image.dtype.byteorder
39 |
40 | if endian == "<" or endian == "=" and sys.byteorder == "little":
41 | scale = -scale
42 |
43 | file.write("%f\n".encode() % scale)
44 |
45 | image.tofile(file)
46 |
47 |
48 | def read_image(path):
49 | """Read image and output RGB image (0-1).
50 |
51 | Args:
52 | path (str): path to file
53 |
54 | Returns:
55 | array: RGB image (0-1)
56 | """
57 | img = cv2.imread(path)
58 | if img.ndim == 2:
59 | img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
60 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
61 | return img
62 |
63 |
64 | def write_depth(path, depth, bits=1):
65 | """Write depth map to pfm and png file.
66 |
67 | Args:
68 | path (str): filepath without extension
69 | depth (array): depth
70 | """
71 | write_pfm(path + ".pfm", depth.astype(np.float32))
72 |
73 | depth_min = depth.min()
74 | depth_max = depth.max()
75 |
76 | max_val = (2**(8 * bits)) - 1
77 |
78 | if depth_max - depth_min > np.finfo("float").eps:
79 | out = max_val * (depth - depth_min) / (depth_max - depth_min)
80 | else:
81 | out = np.zeros(depth.shape, dtype=depth.type)
82 |
83 | if bits == 1:
84 | cv2.imwrite(path + ".png", out.astype("uint8"))
85 | elif bits == 2:
86 | cv2.imwrite(path + ".png", out.astype("uint16"))
87 | return path + '.pfm', path + ".png"
88 |
--------------------------------------------------------------------------------
/examples/OpenPose/HandsEstimation/README.md:
--------------------------------------------------------------------------------
1 | # **OpenPose HandsEstimation的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 |
6 | ## 预训练推理模型下载
7 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
8 |
9 | ## 代码示例
10 | ```python
11 | # main.py
12 | from ppqi import InferenceModel
13 | from processor import preprocess, postprocess
14 |
15 | # 参数配置
16 | configs = {
17 | 'img_path': 'test.jpg',
18 | 'save_dir': 'save_img',
19 | 'model_name': 'openpose_hands_estimation',
20 | 'use_gpu': False,
21 | 'use_mkldnn': False,
22 | 'threshold': 0.1
23 | }
24 |
25 | # 第一步:数据预处理
26 | input_data = preprocess(
27 | configs['img_path']
28 | )
29 |
30 | # 第二步:加载模型
31 | model = InferenceModel(
32 | modelpath=configs['model_name'],
33 | use_gpu=configs['use_gpu'],
34 | use_mkldnn=configs['use_mkldnn']
35 | )
36 | model.eval()
37 |
38 | # 第三步:模型推理
39 | output = model(input_data)
40 |
41 | # 第四步:结果后处理
42 | postprocess(
43 | output,
44 | configs['save_dir'],
45 | configs['img_path'],
46 | configs['model_name'],
47 | configs['threshold']
48 | )
49 | ```
50 |
--------------------------------------------------------------------------------
/examples/OpenPose/HandsEstimation/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'openpose_hands_estimation',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False,
11 | 'threshold': 0.1
12 | }
13 |
14 | # 第一步:数据预处理
15 | input_data = preprocess(
16 | configs['img_path']
17 | )
18 |
19 | # 第二步:加载模型
20 | model = InferenceModel(
21 | modelpath=configs['model_name'],
22 | use_gpu=configs['use_gpu'],
23 | use_mkldnn=configs['use_mkldnn']
24 | )
25 | model.eval()
26 |
27 | # 第三步:模型推理
28 | output = model(input_data)
29 |
30 | # 第四步:结果后处理
31 | postprocess(
32 | output,
33 | configs['save_dir'],
34 | configs['img_path'],
35 | configs['model_name'],
36 | configs['threshold']
37 | )
--------------------------------------------------------------------------------
/examples/OpenPose/HandsEstimation/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 |
4 | __all__ = ['preprocess', 'postprocess']
5 |
6 | def preprocess(img_path):
7 | inHeight = 368
8 | img = cv2.imread(img_path)
9 | img_height, img_width, _ = img.shape
10 | aspect_ratio = img_width / img_height
11 | inWidth = int(((aspect_ratio * inHeight) * 8) // 8)
12 | inpBlob = cv2.dnn.blobFromImage(
13 | img, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
14 |
15 | return inpBlob
16 |
17 | # 结果后处理函数
18 | def postprocess(outputs, output_dir, img_path, model_name, threshold):
19 | img = cv2.imread(img_path)
20 | num_points = 21
21 | if not os.path.exists(output_dir):
22 | os.mkdir(output_dir)
23 |
24 | # 结果后处理
25 | points = []
26 | for idx in range(num_points):
27 | probMap = outputs[0, idx, :, :]
28 | img_height, img_width, _ = img.shape
29 | probMap = cv2.resize(probMap, (img_width, img_height))
30 | minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
31 |
32 | if prob > threshold:
33 | points.append([int(point[0]), int(point[1])])
34 | else:
35 | points.append(None)
36 |
37 | # 结果可视化
38 | vis_pose(img, output_dir, points, model_name)
39 |
40 | # 结果可视化
41 | def vis_pose(img, output_dir, points, model_name):
42 | point_pairs = [
43 | [0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6],
44 | [6, 7], [7, 8], [0, 9], [9, 10], [10, 11],
45 | [11, 12], [0, 13], [13, 14], [14, 15], [15, 16],
46 | [0, 17], [17, 18], [18, 19], [19, 20]
47 | ]
48 |
49 | # 根据结果绘制关键点到原图像上
50 | for pair in point_pairs:
51 | partA = pair[0]
52 | partB = pair[1]
53 |
54 | if points[partA] and points[partB]:
55 | cv2.line(img, tuple(points[partA]), tuple(points[partB]), (0, 255, 255), 3)
56 | cv2.circle(img, tuple(points[partA]), 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
57 |
58 | # 可视化图像保存
59 | cv2.imwrite(os.path.join(output_dir, '%s.jpg' % model_name), img)
--------------------------------------------------------------------------------
/examples/OpenPose/HandsEstimation/save_img/openpose_hands_estimation.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/OpenPose/HandsEstimation/save_img/openpose_hands_estimation.jpg
--------------------------------------------------------------------------------
/examples/OpenPose/HandsEstimation/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/OpenPose/HandsEstimation/test.jpg
--------------------------------------------------------------------------------
/examples/PyramidBox/README.md:
--------------------------------------------------------------------------------
1 | # **PyramidBox的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 |
6 | ## 预训练推理模型下载
7 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
8 |
9 | ## 代码示例
10 | ```python
11 | # main.py
12 | from ppqi import InferenceModel
13 | from processor import preprocess, postprocess
14 |
15 | # 参数配置
16 | configs = {
17 | 'img_path': 'test.jpg',
18 | 'save_dir': 'save_img',
19 | 'model_name': 'pyramidbox_lite_mobile',
20 | 'use_gpu': False,
21 | 'use_mkldnn': False
22 | }
23 |
24 | # 第一步:数据预处理
25 | input_data = preprocess(
26 | configs['img_path'],
27 | shrink=0.5
28 | )
29 |
30 | # 第二步:加载模型
31 | model = InferenceModel(
32 | modelpath=configs['model_name'],
33 | use_gpu=configs['use_gpu'],
34 | use_mkldnn=configs['use_mkldnn']
35 | )
36 | model.eval()
37 |
38 | # 第三步:模型推理
39 | output, _, _, _ = model(input_data)
40 |
41 | # 第四步:结果后处理
42 | postprocess(
43 | output,
44 | configs['save_dir'],
45 | configs['img_path'],
46 | configs['model_name']
47 | )
48 | ```
49 |
--------------------------------------------------------------------------------
/examples/PyramidBox/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'pyramidbox_lite_mobile',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False
11 | }
12 |
13 | # 第一步:数据预处理
14 | input_data = preprocess(
15 | configs['img_path'],
16 | shrink=0.5
17 | )
18 |
19 | # 第二步:加载模型
20 | model = InferenceModel(
21 | modelpath=configs['model_name'],
22 | use_gpu=configs['use_gpu'],
23 | use_mkldnn=configs['use_mkldnn']
24 | )
25 | model.eval()
26 |
27 | # 第三步:模型推理
28 | output, _, _, _ = model(input_data)
29 |
30 | # 第四步:结果后处理
31 | postprocess(
32 | output,
33 | configs['save_dir'],
34 | configs['img_path'],
35 | configs['model_name']
36 | )
--------------------------------------------------------------------------------
/examples/PyramidBox/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 | __all__ = ['preprocess', 'postprocess']
6 |
7 | def preprocess(img_path, shrink):
8 | image = cv2.imread(img_path)
9 | image_height, image_width, image_channel = image.shape
10 | if shrink != 1:
11 | image_height, image_width = int(image_height * shrink), int(
12 | image_width * shrink)
13 | image = cv2.resize(image, (image_width, image_height),
14 | cv2.INTER_NEAREST)
15 | # HWC to CHW
16 | if len(image.shape) == 3:
17 | image = np.swapaxes(image, 1, 2)
18 | image = np.swapaxes(image, 1, 0)
19 | # mean, std
20 | mean = [104., 117., 123.]
21 | scale = 0.007843
22 | image = image.astype('float32')
23 | image -= np.array(mean)[:, np.newaxis, np.newaxis].astype('float32')
24 | image = image * scale
25 | image = np.expand_dims(image, axis=0).astype('float32')
26 | return image
27 |
28 | def postprocess(output_datas, output_dir, img_path, model_name):
29 | img = cv2.imread(img_path)
30 | img_h, img_w = img.shape[:2]
31 | for label, score, x1, y1, x2, y2 in output_datas:
32 | if score>0.9:
33 | x1, y1, x2, y2 = [int(_) for _ in [x1*img_w, y1*img_h, x2*img_w, y2*img_h]]
34 | cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
35 |
36 | # 检查输出目录
37 | if not os.path.exists(output_dir):
38 | os.makedirs(output_dir)
39 |
40 | cv2.imwrite(os.path.join(output_dir, '%s.jpg' % model_name), img)
--------------------------------------------------------------------------------
/examples/PyramidBox/save_img/pyramidbox_lite_mobile.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/PyramidBox/save_img/pyramidbox_lite_mobile.jpg
--------------------------------------------------------------------------------
/examples/PyramidBox/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/PyramidBox/test.jpg
--------------------------------------------------------------------------------
/examples/SINet/README.md:
--------------------------------------------------------------------------------
1 | # **SINet的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 | 
6 |
7 | ## 预训练推理模型下载
8 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
9 |
10 | ## 代码示例
11 | ```python
12 | # main.py
13 | from ppqi import InferenceModel
14 | from processor import preprocess, postprocess
15 |
16 | # 参数配置
17 | configs = {
18 | 'img_path': 'test.jpg',
19 | 'save_dir': 'save_img',
20 | 'model_name': 'SINet_Portrait_Segmentation',
21 | 'use_gpu': False,
22 | 'use_mkldnn': False
23 | }
24 |
25 | # 第一步:数据预处理
26 | input_data = preprocess(configs['img_path'])
27 |
28 | # 第二步:加载模型
29 | model = InferenceModel(
30 | modelpath=configs['model_name'],
31 | use_gpu=configs['use_gpu'],
32 | use_mkldnn=configs['use_mkldnn']
33 | )
34 | model.eval()
35 |
36 | # 第三步:模型推理
37 | output = model(input_data)
38 |
39 | # 第四步:结果后处理
40 | postprocess(
41 | output,
42 | configs['save_dir'],
43 | configs['img_path'],
44 | configs['model_name']
45 | )
46 | ```
47 |
--------------------------------------------------------------------------------
/examples/SINet/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'SINet_Portrait_Segmentation',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False
11 | }
12 |
13 | # 第一步:数据预处理
14 | input_data = preprocess(configs['img_path'])
15 |
16 | # 第二步:加载模型
17 | model = InferenceModel(
18 | modelpath=configs['model_name'],
19 | use_gpu=configs['use_gpu'],
20 | use_mkldnn=configs['use_mkldnn']
21 | )
22 | model.eval()
23 |
24 | # 第三步:模型推理
25 | output = model(input_data)
26 |
27 | # 第四步:结果后处理
28 | postprocess(
29 | output,
30 | configs['save_dir'],
31 | configs['img_path'],
32 | configs['model_name']
33 | )
--------------------------------------------------------------------------------
/examples/SINet/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 | __all__ = ['preprocess', 'postprocess']
6 |
7 | mean = [107.304565, 115.69884, 132.35703 ]
8 | std = [63.97182, 65.1337, 68.29726]
9 |
10 | # 预处理函数
11 | def preprocess(img_path):
12 | # 读取图像
13 | img = cv2.imread(img_path)
14 |
15 | # 缩放
16 | h, w = img.shape[:2]
17 | img = cv2.resize(img, (224, 224))
18 |
19 | # 格式转换
20 | img = img.astype(np.float32)
21 |
22 | # 归一化
23 | for j in range(3):
24 | img[:, :, j] -= mean[j]
25 | for j in range(3):
26 | img[:, :, j] /= std[j]
27 | img /= 255.
28 |
29 | # 格式转换
30 | img = img.transpose((2, 0, 1))
31 | img = img[np.newaxis, ...]
32 |
33 | return img
34 |
35 | # 后处理函数
36 | def postprocess(results, output_dir, img_path, model_name):
37 | # 检查输出目录
38 | if not os.path.exists(output_dir):
39 | os.mkdir(output_dir)
40 |
41 | # 读取图像
42 | img = cv2.imread(img_path)
43 |
44 | # 计算MASK
45 | mask = 1 - (results[0][0] > 0).astype('float32')
46 |
47 | # 缩放
48 | h, w = img.shape[:2]
49 | mask = cv2.resize(mask, (w, h))
50 |
51 | # 计算输出图像
52 | result = (img * mask[..., np.newaxis] + (1 - mask[..., np.newaxis]) * 255).astype(np.uint8)
53 |
54 | # 格式还原
55 | mask = (mask * 255).astype(np.uint8)
56 |
57 | # 可视化
58 | cv2.imwrite(os.path.join(output_dir, 'result_mask_%s.png' % model_name), mask)
59 | cv2.imwrite(os.path.join(output_dir, 'result_%s.png' % model_name), result)
--------------------------------------------------------------------------------
/examples/SINet/save_img/result_SINet_Portrait_Segmentation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/SINet/save_img/result_SINet_Portrait_Segmentation.png
--------------------------------------------------------------------------------
/examples/SINet/save_img/result_mask_SINet_Portrait_Segmentation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/SINet/save_img/result_mask_SINet_Portrait_Segmentation.png
--------------------------------------------------------------------------------
/examples/SINet/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/SINet/test.jpg
--------------------------------------------------------------------------------
/examples/U2Net/PortraitGeneration/README.md:
--------------------------------------------------------------------------------
1 | # **U2Net Portrait的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 |
6 | ## 预训练推理模型下载
7 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
8 |
9 | ## 代码示例
10 | ```python
11 | # main.py
12 | from ppqi import InferenceModel
13 | from processor import preprocess, postprocess
14 |
15 | # 参数配置
16 | configs = {
17 | 'img_path': 'test.jpg',
18 | 'save_dir': 'save_img',
19 | 'model_name': 'u2net_portrait',
20 | 'use_gpu': False,
21 | 'use_mkldnn': False
22 | }
23 |
24 | # 第一步:数据预处理
25 | input_data = preprocess(configs['img_path'])
26 |
27 | # 第二步:加载模型
28 | model = InferenceModel(
29 | modelpath=configs['model_name'],
30 | use_gpu=configs['use_gpu'],
31 | use_mkldnn=configs['use_mkldnn']
32 | )
33 | model.eval()
34 |
35 | # 第三步:模型推理
36 | d0, _, _, _, _, _, _ = model(input_data)
37 |
38 | # 第四步:结果后处理
39 | postprocess(
40 | d0,
41 | configs['save_dir'],
42 | configs['model_name']
43 | )
44 | ```
45 |
--------------------------------------------------------------------------------
/examples/U2Net/PortraitGeneration/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'u2net_portrait',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False
11 | }
12 |
13 | # 第一步:数据预处理
14 | input_data = preprocess(configs['img_path'])
15 |
16 | # 第二步:加载模型
17 | model = InferenceModel(
18 | modelpath=configs['model_name'],
19 | use_gpu=configs['use_gpu'],
20 | use_mkldnn=configs['use_mkldnn']
21 | )
22 | model.eval()
23 |
24 | # 第三步:模型推理
25 | d0, _, _, _, _, _, _ = model(input_data)
26 |
27 | # 第四步:结果后处理
28 | postprocess(
29 | d0,
30 | configs['save_dir'],
31 | configs['model_name']
32 | )
--------------------------------------------------------------------------------
/examples/U2Net/PortraitGeneration/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 | __all__ = ['preprocess', 'postprocess']
6 |
7 | def normPRED(d):
8 | ma = np.max(d)
9 | mi = np.min(d)
10 |
11 | dn = (d-mi)/(ma-mi)
12 |
13 | return dn
14 |
15 |
16 | def preprocess(img_path):
17 | img = cv2.imread(img_path)
18 |
19 | # 缩放
20 | img = cv2.resize(img, (512, 512))
21 |
22 | # 归一化
23 | means = [0.225, 0.224, 0.229]
24 | vars = [0.406, 0.456, 0.485]
25 | img = img/np.max(img)
26 | img -= vars
27 | img /= means
28 |
29 | # convert BGR to RGB
30 | img = img.transpose((2, 0, 1))
31 | img = img[np.newaxis, :, :, :].astype('float32')
32 |
33 | return img
34 |
35 |
36 | def postprocess(outputs, output_dir, model_name):
37 | pred = 1.0 - outputs[:, 0, :, :]
38 | pred = normPRED(pred)
39 | pred = pred.squeeze()
40 | pred = (pred*255).astype(np.uint8)
41 | if not os.path.exists(output_dir):
42 | os.mkdir(output_dir)
43 | cv2.imwrite(os.path.join(output_dir, '%s.jpg' % model_name), pred)
44 |
--------------------------------------------------------------------------------
/examples/U2Net/PortraitGeneration/save_img/u2net_portrait.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/U2Net/PortraitGeneration/save_img/u2net_portrait.jpg
--------------------------------------------------------------------------------
/examples/U2Net/PortraitGeneration/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/U2Net/PortraitGeneration/test.jpg
--------------------------------------------------------------------------------
/examples/UGATIT/README.md:
--------------------------------------------------------------------------------
1 | # **UGATIT的推理部署**
2 | ## 效果展示
3 | 
4 | 
5 |
6 | ## 预训练推理模型下载
7 | * 下载链接:[Paddle Quick Inference Examples](https://aistudio.baidu.com/aistudio/datasetdetail/66517)
8 |
9 | ## 代码示例
10 | ```python
11 | # main.py
12 | from ppqi import InferenceModel
13 | from processor import preprocess, postprocess
14 |
15 | # 参数配置
16 | configs = {
17 | 'img_path': 'test.jpg',
18 | 'save_dir': 'save_img',
19 | 'model_name': 'UGATIT_100w',
20 | 'use_gpu': False,
21 | 'use_mkldnn': False
22 | }
23 |
24 | # 第一步:数据预处理
25 | input_data = preprocess(configs['img_path'])
26 |
27 | # 第二步:加载模型
28 | model = InferenceModel(
29 | modelpath=configs['model_name'],
30 | use_gpu=configs['use_gpu'],
31 | use_mkldnn=configs['use_mkldnn']
32 | )
33 | model.eval()
34 |
35 | # 第三步:模型推理
36 | output = model(input_data)
37 |
38 | # 第四步:结果后处理
39 | postprocess(
40 | output,
41 | configs['save_dir'],
42 | configs['model_name']
43 | )
44 | ```
45 |
--------------------------------------------------------------------------------
/examples/UGATIT/main.py:
--------------------------------------------------------------------------------
1 | from ppqi import InferenceModel
2 | from processor import preprocess, postprocess
3 |
4 | # 参数配置
5 | configs = {
6 | 'img_path': 'test.jpg',
7 | 'save_dir': 'save_img',
8 | 'model_name': 'UGATIT_100w',
9 | 'use_gpu': False,
10 | 'use_mkldnn': False
11 | }
12 |
13 | # 第一步:数据预处理
14 | input_data = preprocess(configs['img_path'])
15 |
16 | # 第二步:加载模型
17 | model = InferenceModel(
18 | modelpath=configs['model_name'],
19 | use_gpu=configs['use_gpu'],
20 | use_mkldnn=configs['use_mkldnn']
21 | )
22 | model.eval()
23 |
24 | # 第三步:模型推理
25 | output = model(input_data)
26 |
27 | # 第四步:结果后处理
28 | postprocess(
29 | output,
30 | configs['save_dir'],
31 | configs['model_name']
32 | )
33 |
--------------------------------------------------------------------------------
/examples/UGATIT/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 | __all__ = ['preprocess', 'postprocess']
6 |
7 | def preprocess(img_path):
8 | # 读取图片
9 | img = cv2.imread(img_path)
10 |
11 | # 缩放图像
12 | img = cv2.resize(img, (256, 256))
13 |
14 | # 归一化
15 | img = (img / 255.0 - 0.5) / 0.5
16 |
17 | # 转置
18 | img = img.transpose((2, 0, 1))
19 |
20 | # 增加维度
21 | img = np.expand_dims(img, axis=0).astype('float32')
22 |
23 | # 返回输入数据
24 | return img
25 |
26 | def postprocess(outputs, output_dir, model_name):
27 | # 反归一化
28 | img = (outputs[0] * 0.5 + 0.5) * 255.
29 |
30 | # 限幅
31 | img = np.clip(img, 0, 255).astype(np.uint8)
32 |
33 | # 转置
34 | img = img.transpose((1, 2, 0))
35 |
36 | # 检查输出目录
37 | if not os.path.exists(output_dir):
38 | os.makedirs(output_dir)
39 |
40 | # 写入输出图片
41 | cv2.imwrite(os.path.join(output_dir, '%s.jpg' % model_name), img)
--------------------------------------------------------------------------------
/examples/UGATIT/save_img/UGATIT_100w.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/UGATIT/save_img/UGATIT_100w.jpg
--------------------------------------------------------------------------------
/examples/UGATIT/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/examples/UGATIT/test.jpg
--------------------------------------------------------------------------------
/ppqi/__init__.py:
--------------------------------------------------------------------------------
1 | from .inference import InferenceModel
--------------------------------------------------------------------------------
/ppqi/inference.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | from paddle.inference import create_predictor, Config
5 |
6 | __all__ = ['InferenceModel']
7 |
8 |
9 | class InferenceModel:
10 | # 初始化函数
11 | def __init__(self,
12 | modelpath,
13 | use_gpu=False,
14 | gpu_id=0,
15 | use_mkldnn=False,
16 | cpu_threads=1):
17 | '''
18 | init the inference model
19 |
20 | modelpath: inference model path
21 |
22 | use_gpu: use gpu or not
23 |
24 | use_mkldnn: use mkldnn or not
25 | '''
26 | # 加载模型配置
27 | self.config = self.load_config(modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads)
28 |
29 | # 打印函数
30 | def __repr__(self):
31 | '''
32 | get the numbers and name of inputs and outputs
33 | '''
34 | return 'input_num: %d\ninput_names: %s\noutput_num: %d\noutput_names: %s' % (
35 | self.input_num,
36 | str(self.input_names),
37 | self.output_num,
38 | str(self.output_names)
39 | )
40 |
41 | # 类调用函数
42 | def __call__(self, *input_datas, batch_size=1):
43 | '''
44 | call function
45 | '''
46 | return self.forward(*input_datas, batch_size=batch_size)
47 |
48 | # 模型参数加载函数
49 | def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads):
50 | '''
51 | load the model config
52 |
53 | modelpath: inference model path
54 |
55 | use_gpu: use gpu or not
56 |
57 | use_mkldnn: use mkldnn or not
58 | '''
59 | # 对运行位置进行配置
60 | if use_gpu:
61 | try:
62 | int(os.environ.get('CUDA_VISIBLE_DEVICES'))
63 | except Exception:
64 | print(
65 | '''Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU. Now switch to CPU to continue...''')
66 | use_gpu = False
67 |
68 | if os.path.isdir(modelpath):
69 | if os.path.exists(os.path.join(modelpath, "__params__")):
70 | # __model__ + __params__
71 | model = os.path.join(modelpath, "__model__")
72 | params = os.path.join(modelpath, "__params__")
73 | config = Config(model, params)
74 | elif os.path.exists(os.path.join(modelpath, "params")):
75 | # model + params
76 | model = os.path.join(modelpath, "model")
77 | params = os.path.join(modelpath, "params")
78 | config = Config(model, params)
79 | elif os.path.exists(os.path.join(modelpath, "__model__")):
80 | # __model__ + others
81 | config = Config(modelpath)
82 | else:
83 | raise Exception(
84 | "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath))
85 | elif os.path.exists(modelpath + ".pdmodel"):
86 | # *.pdmodel + *.pdiparams
87 | model = modelpath + ".pdmodel"
88 | params = modelpath + ".pdiparams"
89 | config = Config(model, params)
90 | elif isinstance(modelpath, Config):
91 | config = modelpath
92 | else:
93 | raise Exception(
94 | "Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath))
95 |
96 | # 设置参数
97 | if use_gpu:
98 | config.enable_use_gpu(100, gpu_id)
99 | else:
100 | config.disable_gpu()
101 | config.set_cpu_math_library_num_threads(cpu_threads)
102 | if use_mkldnn:
103 | config.enable_mkldnn()
104 |
105 | config.disable_glog_info()
106 |
107 | # 返回配置
108 | return config
109 |
110 | # 预测器创建函数
111 | def eval(self):
112 | '''
113 | create the model predictor by model config
114 | '''
115 | # 创建预测器
116 | self.predictor = create_predictor(self.config)
117 |
118 | # 获取模型的输入输出名称
119 | self.input_names = self.predictor.get_input_names()
120 | self.output_names = self.predictor.get_output_names()
121 |
122 | # 获取模型的输入输出节点数量
123 | self.input_num = len(self.input_names)
124 | self.output_num = len(self.output_names)
125 |
126 | # 获取输入
127 | self.input_handles = []
128 | for input_name in self.input_names:
129 | self.input_handles.append(
130 | self.predictor.get_input_handle(input_name))
131 |
132 | # 获取输出
133 | self.output_handles = []
134 | for output_name in self.output_names:
135 | self.output_handles.append(
136 | self.predictor.get_output_handle(output_name))
137 |
138 | # 前向计算函数
139 | def forward(self, *input_datas, batch_size=1):
140 | """
141 | model inference
142 |
143 | batch_size: batch size
144 |
145 | *input_datas: x1, x2, ..., xn
146 | """
147 | # 切分输入数据
148 | datas_num = input_datas[0].shape[0]
149 | split_num = datas_num // batch_size + \
150 | 1 if datas_num % batch_size != 0 else datas_num // batch_size
151 | input_datas = [np.array_split(input_data, split_num)
152 | for input_data in input_datas]
153 |
154 | # 遍历输入数据进行预测
155 | outputs = {}
156 | for step in range(split_num):
157 | for i in range(self.input_num):
158 | input_data = input_datas[i][step].copy()
159 | self.input_handles[i].copy_from_cpu(input_data)
160 |
161 | self.predictor.run()
162 |
163 | for i in range(self.output_num):
164 | output = self.output_handles[i].copy_to_cpu()
165 | if i in outputs:
166 | outputs[i].append(output)
167 | else:
168 | outputs[i] = [output]
169 |
170 | # 预测结果合并
171 | for key in outputs.keys():
172 | outputs[key] = np.concatenate(outputs[key], 0)
173 |
174 | outputs = [v for v in outputs.values()]
175 |
176 | # 返回预测结果
177 | return tuple(outputs) if len(outputs) > 1 else outputs[0]
178 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | paddlepaddle
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | setup(
3 | name='ppqi',
4 | version='1.0.4',
5 | author='jm12138',
6 | author_email='2286040843@qq.com',
7 | packages=['ppqi'],
8 | license='Apache-2.0 License',
9 | description='Paddle Quick Inference'
10 | )
11 |
--------------------------------------------------------------------------------
/wheel/ppqi-1.0.4-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AgentMaker/PaddleQuickInference/cc140ba55f089288c2efacdb5f0e47b839e34364/wheel/ppqi-1.0.4-py3-none-any.whl
--------------------------------------------------------------------------------