51 |
52 | 序号 |
53 | API |
54 | 参数解释 |
55 | 说明 |
56 |
57 |
58 | 1 |
59 | 方法 |
60 | PaddleInfer() |
61 | 构造函数,初始化推理核心,读取本地模型 |
62 | |
63 |
64 |
65 | 参数 |
66 | string model_path |
67 | 静态图模型文件 |
68 |
69 |
70 | string params_path |
71 | 模型配置文件信息,默认为空 |
72 |
73 |
74 | 2 |
75 | 方法 |
76 | void set_divice() |
77 | 设置推理设备 |
78 | 支持 CPU、GPU、ONNX runtime、oneDNN |
79 |
80 |
81 | 参数 |
82 | Divice divice |
83 | 设备名称选择 |
84 |
85 |
86 | int num |
87 | 对于CPU、ONNX runtime代表线程数,默认为10; 对于GPU代表显卡编号,默认为0; 对于oneDNN代表cache数量,默认为1 |
88 |
89 |
90 | ulong memory_init_size |
91 | 显存分配空间(在使用GPU时作用),默认为500 |
92 |
93 |
94 | int workspace_size |
95 | 显存工作空间(在使用GPU时作用),默认为30 |
96 |
97 |
98 | 3 |
99 | 方法 |
100 | List <string> get_input_names() |
101 | 获取输入节点名字 |
102 | |
103 |
104 |
105 | 4 |
106 | 方法 |
107 | void set_input_shape() |
108 | 设置输入节点形状 |
109 | 根据节点维度设置 |
110 |
111 |
112 | 参数 |
113 | int[] input_shape |
114 | 形状数组 |
115 |
116 |
117 | string input_name |
118 | 节点名称 |
119 |
120 |
121 | 5 |
122 | 方法 |
123 | void load_input_data() |
124 | 设置图片/普通输入数据 |
125 | 方法重载 |
126 |
127 |
128 | 参数 |
129 | string input_name |
130 | 输入节点名称 |
131 |
132 |
133 | float[] input_data |
134 | 输入数据 |
135 |
136 |
137 | 参数 |
138 | string input_name |
139 | 输入节点名称 |
140 |
141 |
142 | byte[] image_data |
143 | 图片数据 |
144 |
145 |
146 | ulong image_size |
147 | 图片长度 |
148 |
149 |
150 | int type |
151 | 数据处理类型: type == 0: 均值方差归一化、直接缩放 type == 1: 普通归一化、直接缩放 type == 2: 均值方差归一化、仿射变换 |
152 |
153 |
154 | 6 |
155 | 方法 |
156 | void infer() |
157 | 模型推理 |
158 | |
159 |
160 |
161 | 7 |
162 | 方法 |
163 | List <string> get_output_names() |
164 | 获取输出节点名字 |
165 | |
166 |
167 |
168 | 8 |
169 | 方法 |
170 | List <int> get_shape() |
171 | 获取指定节点形状 |
172 | |
173 |
174 |
175 | 参数 |
176 | string node_name |
177 | 节点名称 |
178 |
179 |
180 | 9 |
181 | 方法 |
182 | void T[] read_infer_result <T>() |
183 | 读取推理结果数据 |
184 | 支持读取Float32、Int32、Int64格式数据 |
185 |
186 |
187 | 参数 |
188 | string output_name |
189 | 输出节点名 |
190 |
191 |
192 | int data_size |
193 | 输出数据长度 |
194 |
195 |
196 | 10 |
197 | 方法 |
198 | void delet() |
199 | 删除内存地址 |
200 | |
201 |
202 |
203 |
204 |
205 |
206 | ## 4.2 枚举
207 |
208 |
209 |
210 | 序号 |
211 | 枚举名 |
212 | 枚举变量 |
213 | 含义 |
214 |
215 |
216 | 1 |
217 | Divice 设备名称 |
218 | CPU |
219 | 使用CPU推理 |
220 |
221 |
222 | GPU |
223 | 使用GPU推理 |
224 |
225 |
226 | ONNX_runtime |
227 | 使用ONNX_runtime推理 |
228 |
229 |
230 | oneDNN |
231 | 使用oneDNN推理 |
232 |
233 |
234 | 关于上述方法的使用,后续会更新县官的案例教程以及详细的技术文档,敬请期待。
235 |
--------------------------------------------------------------------------------
/doc/image/paddleinferencesharp.drawio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guojin-yan/PaddleInferenceSharp/732b9cc3401bf657264b9d03bb06756a75e38e01/doc/image/paddleinferencesharp.drawio.png
--------------------------------------------------------------------------------
/doc/image/paddle名称.drawio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guojin-yan/PaddleInferenceSharp/732b9cc3401bf657264b9d03bb06756a75e38e01/doc/image/paddle名称.drawio.png
--------------------------------------------------------------------------------
/paddle_infrer_api/dll/PaddleInferAPI.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guojin-yan/PaddleInferenceSharp/732b9cc3401bf657264b9d03bb06756a75e38e01/paddle_infrer_api/dll/PaddleInferAPI.dll
--------------------------------------------------------------------------------
/paddle_infrer_api/dll/PaddleInferAPI.exp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guojin-yan/PaddleInferenceSharp/732b9cc3401bf657264b9d03bb06756a75e38e01/paddle_infrer_api/dll/PaddleInferAPI.exp
--------------------------------------------------------------------------------
/paddle_infrer_api/dll/PaddleInferAPI.lib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guojin-yan/PaddleInferenceSharp/732b9cc3401bf657264b9d03bb06756a75e38e01/paddle_infrer_api/dll/PaddleInferAPI.lib
--------------------------------------------------------------------------------
/paddle_infrer_api/paddle_infrer_api.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 | Debug
14 | x64
15 |
16 |
17 | Release
18 | x64
19 |
20 |
21 |
22 | 16.0
23 | Win32Proj
24 | {cd3c1d0f-9f39-4f8d-8230-fda3d50deb98}
25 | paddleinfrerapi
26 | 10.0
27 |
28 |
29 |
30 | Application
31 | true
32 | v143
33 | Unicode
34 |
35 |
36 | Application
37 | false
38 | v143
39 | true
40 | Unicode
41 |
42 |
43 | Application
44 | true
45 | v143
46 | Unicode
47 |
48 |
49 | DynamicLibrary
50 | false
51 | v143
52 | true
53 | Unicode
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 | D:\Program Files\paddle_inference\paddle\include;E:\OpenCV Source\opencv-4.5.5\build\include;E:\OpenCV Source\opencv-4.5.5\build\include\opencv2;$(IncludePath)
75 | D:\Program Files\paddle_inference\paddle\lib;E:\OpenCV Source\opencv-4.5.5\build\x64\vc15\lib;$(LibraryPath)
76 | PaddleInferAPI
77 | dll/
78 |
79 |
80 |
81 | Level3
82 | true
83 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
84 | true
85 |
86 |
87 | Console
88 | true
89 |
90 |
91 |
92 |
93 | Level3
94 | true
95 | true
96 | true
97 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
98 | true
99 |
100 |
101 | Console
102 | true
103 | true
104 | true
105 |
106 |
107 |
108 |
109 | Level3
110 | true
111 | _DEBUG;_CONSOLE;%(PreprocessorDefinitions)
112 | true
113 |
114 |
115 | Console
116 | true
117 |
118 |
119 |
120 |
121 | Level3
122 | true
123 | true
124 | true
125 | NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
126 | true
127 |
128 |
129 | Console
130 | true
131 | true
132 | true
133 | opencv_world455.lib;paddle_inference.lib;%(AdditionalDependencies)
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
--------------------------------------------------------------------------------
/paddle_infrer_api/paddle_infrer_api.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
6 | cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx
7 |
8 |
9 | {93995380-89BD-4b04-88EB-625FBE52EBFB}
10 | h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd
11 |
12 |
13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
15 |
16 |
17 |
18 |
19 | 源文件
20 |
21 |
22 |
--------------------------------------------------------------------------------
/paddle_infrer_api/src/paddle_infer_cpp_api.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guojin-yan/PaddleInferenceSharp/732b9cc3401bf657264b9d03bb06756a75e38e01/paddle_infrer_api/src/paddle_infer_cpp_api.cpp
--------------------------------------------------------------------------------
/text_paddle_infer/PP-Yoloe.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 | using PaddleInferenceSharp;
7 | using OpenCvSharp;
8 |
9 | namespace text_paddle_infer
10 | {
11 | internal class PP_Yoloe
12 | {
13 | public static void paddle_deploy_ppyoloe()
14 | {
15 | //----------------------1.模型相关信息----------------------//
16 | string model_path = @"E:\Text_Model\ppyoloe_plus_crn_l_80e_coco\model.pdmodel";
17 | string params_path = @"E:\Text_Model\ppyoloe_plus_crn_l_80e_coco\model.pdiparams";
18 | string image_path = @"E:\Text_dataset\YOLOv5\0003.jpg";
19 | DateTime begin, end;
20 | TimeSpan t0, t1, t2, t3;
21 | //----------------------2. 创建并配置预测器------------------//
22 | begin = DateTime.Now;
23 | PaddleInfer predictor = new PaddleInfer(model_path, params_path);
24 | // 设置设备类型
25 | //predictor.set_divice(Divice.CPU, 10); // CPU
26 | //predictor.set_divice(Divice.GPU, 0, 500, 30); // GPU
27 | predictor.set_divice(Divice.ONNX_runtime, 10); // ONNX_runtime
28 | //predictor.set_divice(Divice.oneDNN, 1); // oneDNN
29 | // 获取输入节点
30 | List input_name = predictor.get_input_names();
31 | for (int i = 0; i < input_name.Count; i++)
32 | {
33 | Console.WriteLine("模型输入 {0} {1}", i, input_name[i]);
34 | }
35 | // 设置输入形状
36 | int[] input_size1 = new int[4] { 1, 3, 640, 640 };
37 | predictor.set_input_shape(input_size1, input_name[0]);
38 | int[] input_size2 = new int[2] { 1,2};
39 | predictor.set_input_shape(input_size2, input_name[1]);
40 | end = DateTime.Now;
41 | t0 = end - begin;
42 | //----------------------3. 加载推理数据------------------//
43 | begin = DateTime.Now;
44 | Mat image = Cv2.ImRead(image_path);
45 | // 将图片放在矩形背景下
46 | int max_image_length = image.Cols > image.Rows ? image.Cols : image.Rows;
47 | Mat max_image = Mat.Zeros(new Size(max_image_length, max_image_length), MatType.CV_8UC3);
48 | Rect roi = new Rect(0, 0, image.Cols, image.Rows);
49 | image.CopyTo(new Mat(max_image, roi));
50 | byte[] input_image_data = max_image.ImEncode(".bmp");
51 | // 数据长度
52 | ulong input_image_length = Convert.ToUInt64(input_image_data.Length);
53 | predictor.load_input_data(input_name[0], input_image_data, input_image_length, 2);
54 | float scale_factor = 640.0f/ max_image_length;
55 | float[] input_scale = new float[] { scale_factor, scale_factor };
56 | predictor.load_input_data(input_name[1], input_scale);
57 | end = DateTime.Now;
58 | t1 = end - begin;
59 | //----------------------4. 模型推理------------------//
60 | begin = DateTime.Now;
61 | predictor.infer();
62 | end = DateTime.Now;
63 | t2 = end - begin;
64 | //----------------------5. 模型推理结果------------------//
65 | begin = DateTime.Now;
66 | int[] leng = new int[4];
67 | List output_name = predictor.get_output_names();
68 | for (int i = 0; i < input_name.Count; i++)
69 | {
70 | Console.WriteLine("模型输出 {0} {1}", i, output_name[i]);
71 | }
72 | List output_shape = predictor.get_shape(output_name[0]);
73 | Console.WriteLine("output_shape:{0} × {1}", output_shape[0], output_shape[1]);
74 | int[] output_num = predictor.read_infer_result(output_name[1], 1);
75 | Console.WriteLine(output_num[0]);
76 | float[] output = predictor.read_infer_result(output_name[0], output_num[0] * 6);
77 | List boxes = new List();
78 | List classes = new List();
79 | List scores = new List();
80 | for (int i = 0; i < output_num[0]; i++)
81 | {
82 | if (output[6 * i + 1] > 0.4)
83 | {
84 | scores.Add(output[6 * i + 1]);
85 | classes.Add((int)output[6 * i]);
86 | Rect rect = new Rect((int)output[6 * i + 2], (int)output[6 * i + 3],
87 | (int)(output[6 * i + 4] - output[6 * i + 2]), (int)(output[6 * i + 5] - output[6 * i + 3]));
88 | boxes.Add(rect);
89 | }
90 |
91 | }
92 | end = DateTime.Now;
93 | t3 = end - begin;
94 | Console.WriteLine("模型加载时间:{0}", t0.TotalMilliseconds);
95 | Console.WriteLine("推理数据加载时间:{0}", t1.TotalMilliseconds);
96 | Console.WriteLine("模型推理时间:{0}", t2.TotalMilliseconds);
97 | Console.WriteLine("结果处理时间:{0}", t3.TotalMilliseconds);
98 |
99 | for (int i = 0; i < classes.Count; i++)
100 | {
101 | Cv2.Rectangle(image, boxes[i], new Scalar(0, 0, 255), 1, LineTypes.Link8);
102 | Cv2.PutText(image, scores[i].ToString(), new Point(boxes[i].X, boxes[i].Y - 5),
103 | HersheyFonts.HersheySimplex, 0.5, new Scalar(0, 255, 0));
104 | }
105 | Cv2.ImShow("result", image);
106 | Cv2.WaitKey(0);
107 | }
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/text_paddle_infer/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using OpenCvSharp;
3 | using PaddleInferenceSharp;
4 | namespace text_paddle_infer // Note: actual namespace depends on the project name.
5 | {
6 | internal class Program
7 | {
8 | static void Main(string[] args)
9 | {
10 | Console.WriteLine("Hello World!");
11 | //ResNet50.paddle_deploy_resnet50();
12 | PP_Yoloe.paddle_deploy_ppyoloe();
13 | }
14 | }
15 | }
--------------------------------------------------------------------------------
/text_paddle_infer/ResNet50.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 | using PaddleInferenceSharp;
7 | using OpenCvSharp;
8 |
9 | namespace text_paddle_infer
10 | {
11 | internal class ResNet50
12 | {
13 | public static void paddle_deploy_resnet50()
14 | {
15 | //----------------------1.模型相关信息----------------------//
16 | string model_path = "E:/Text_Model/flowerclas/inference.pdmodel";
17 | string params_path = "E:/Text_Model/flowerclas/inference.pdiparams";
18 | string image_path = "E:/Text_dataset/flowers102/jpg/image_00005.jpg";
19 | DateTime begin, end;
20 | TimeSpan t0, t1, t2, t3;
21 | //----------------------2. 创建并配置预测器------------------//
22 | begin = DateTime.Now;
23 | PaddleInfer predictor = new PaddleInfer(model_path, params_path);
24 | // 设置设备类型
25 | predictor.set_divice(Divice.CPU, 10); // CPU
26 | //predictor.set_divice(Divice.GPU, 0, 500, 30); // GPU
27 | //predictor.set_divice(Divice.ONNX_runtime, 10); // ONNX_runtime
28 | //predictor.set_divice(Divice.oneDNN, 1); // oneDNN
29 | // 获取输入节点
30 | List input_name = predictor.get_input_names();
31 | for (int i = 0; i < input_name.Count; i++)
32 | {
33 | Console.WriteLine("模型输入 {0} {1}", i, input_name[i]);
34 | }
35 | // 设置输入形状
36 | int[] input_size = new int[4] { 1, 3, 224, 224 };
37 | predictor.set_input_shape(input_size, input_name[0]);
38 | end = DateTime.Now;
39 | t0 = end - begin;
40 | //----------------------3. 加载推理数据------------------//
41 | begin = DateTime.Now;
42 | Mat image = Cv2.ImRead(image_path);
43 | byte[] input_image_data = image.ImEncode(".bmp");
44 | // 数据长度
45 | ulong input_image_length = Convert.ToUInt64(input_image_data.Length);
46 | predictor.load_input_data(input_name[0], input_image_data, input_image_length, 0);
47 | end = DateTime.Now;
48 | t1 = end - begin;
49 | //----------------------4. 模型推理------------------//
50 | begin = DateTime.Now;
51 | predictor.infer();
52 | end = DateTime.Now;
53 | t2 = end - begin;
54 | //----------------------5. 模型推理结果------------------//
55 | begin = DateTime.Now;
56 | int[] leng = new int[4];
57 | List output_name = predictor.get_output_names();
58 | for (int i = 0; i < input_name.Count; i++)
59 | {
60 | Console.WriteLine("模型输出 {0} {1}", i, output_name[i]);
61 | }
62 | float[] output = predictor.read_infer_result(output_name[0], 102);
63 | float max;
64 | int index = max_indax(output, out max);
65 | end = DateTime.Now;
66 | t3 = end - begin;
67 | Console.WriteLine("最大类别为:{0},分数:{1}。", index, max);
68 | Console.WriteLine("模型加载时间:{0}", t0.TotalMilliseconds);
69 | Console.WriteLine("推理数据加载时间:{0}", t1.TotalMilliseconds);
70 | Console.WriteLine("模型推理时间:{0}", t2.TotalMilliseconds);
71 | Console.WriteLine("结果处理时间:{0}", t3.TotalMilliseconds);
72 |
73 |
74 | }
75 |
76 | static int max_indax(T[] data, out T max) where T : IComparable
77 | {
78 | int index = 0;
79 | max = data[0];
80 | for (int i = 0; i < data.Length; i++)
81 | {
82 | if (data[i].CompareTo(max) > 0)
83 | {
84 | index = i;
85 | max = data[i];
86 | }
87 | }
88 |
89 |
90 |
91 |
92 | return index;
93 | }
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/text_paddle_infer/text_paddle_infer.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Exe
5 | net6.0
6 | enable
7 | enable
8 | true
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------