├── ObjectTracker.xcodeproj
├── xcuserdata
│ └── dasen.xcuserdatad
│ │ ├── xcdebugger
│ │ └── Breakpoints_v2.xcbkptlist
│ │ └── xcschemes
│ │ └── xcschememanagement.plist
└── project.pbxproj
├── ObjectTracker
├── Info.plist
├── Base.lproj
│ ├── LaunchScreen.storyboard
│ └── Main.storyboard
├── Assets.xcassets
│ └── AppIcon.appiconset
│ │ └── Contents.json
├── AppDelegate.swift
└── ViewController.swift
└── readme.md
/ObjectTracker.xcodeproj/xcuserdata/dasen.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
--------------------------------------------------------------------------------
/ObjectTracker.xcodeproj/xcuserdata/dasen.xcuserdatad/xcschemes/xcschememanagement.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | SchemeUserState
6 |
7 | ObjectTracker.xcscheme
8 |
9 | orderHint
10 | 0
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/ObjectTracker/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | $(DEVELOPMENT_LANGUAGE)
7 | CFBundleExecutable
8 | $(EXECUTABLE_NAME)
9 | CFBundleIdentifier
10 | $(PRODUCT_BUNDLE_IDENTIFIER)
11 | CFBundleInfoDictionaryVersion
12 | 6.0
13 | CFBundleName
14 | $(PRODUCT_NAME)
15 | CFBundlePackageType
16 | APPL
17 | CFBundleShortVersionString
18 | 1.0
19 | CFBundleVersion
20 | 1
21 | LSRequiresIPhoneOS
22 |
23 | NSCameraUsageDescription
24 | ""
25 | UILaunchStoryboardName
26 | LaunchScreen
27 | UIMainStoryboardFile
28 | Main
29 | UIRequiredDeviceCapabilities
30 |
31 | armv7
32 |
33 | UISupportedInterfaceOrientations
34 |
35 | UIInterfaceOrientationPortrait
36 | UIInterfaceOrientationLandscapeLeft
37 | UIInterfaceOrientationLandscapeRight
38 |
39 | UISupportedInterfaceOrientations~ipad
40 |
41 | UIInterfaceOrientationPortrait
42 | UIInterfaceOrientationPortraitUpsideDown
43 | UIInterfaceOrientationLandscapeLeft
44 | UIInterfaceOrientationLandscapeRight
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/ObjectTracker/Base.lproj/LaunchScreen.storyboard:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/ObjectTracker/Assets.xcassets/AppIcon.appiconset/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "images" : [
3 | {
4 | "idiom" : "iphone",
5 | "size" : "20x20",
6 | "scale" : "2x"
7 | },
8 | {
9 | "idiom" : "iphone",
10 | "size" : "20x20",
11 | "scale" : "3x"
12 | },
13 | {
14 | "idiom" : "iphone",
15 | "size" : "29x29",
16 | "scale" : "2x"
17 | },
18 | {
19 | "idiom" : "iphone",
20 | "size" : "29x29",
21 | "scale" : "3x"
22 | },
23 | {
24 | "idiom" : "iphone",
25 | "size" : "40x40",
26 | "scale" : "2x"
27 | },
28 | {
29 | "idiom" : "iphone",
30 | "size" : "40x40",
31 | "scale" : "3x"
32 | },
33 | {
34 | "idiom" : "iphone",
35 | "size" : "60x60",
36 | "scale" : "2x"
37 | },
38 | {
39 | "idiom" : "iphone",
40 | "size" : "60x60",
41 | "scale" : "3x"
42 | },
43 | {
44 | "idiom" : "ipad",
45 | "size" : "20x20",
46 | "scale" : "1x"
47 | },
48 | {
49 | "idiom" : "ipad",
50 | "size" : "20x20",
51 | "scale" : "2x"
52 | },
53 | {
54 | "idiom" : "ipad",
55 | "size" : "29x29",
56 | "scale" : "1x"
57 | },
58 | {
59 | "idiom" : "ipad",
60 | "size" : "29x29",
61 | "scale" : "2x"
62 | },
63 | {
64 | "idiom" : "ipad",
65 | "size" : "40x40",
66 | "scale" : "1x"
67 | },
68 | {
69 | "idiom" : "ipad",
70 | "size" : "40x40",
71 | "scale" : "2x"
72 | },
73 | {
74 | "idiom" : "ipad",
75 | "size" : "76x76",
76 | "scale" : "1x"
77 | },
78 | {
79 | "idiom" : "ipad",
80 | "size" : "76x76",
81 | "scale" : "2x"
82 | },
83 | {
84 | "idiom" : "ipad",
85 | "size" : "83.5x83.5",
86 | "scale" : "2x"
87 | }
88 | ],
89 | "info" : {
90 | "version" : 1,
91 | "author" : "xcode"
92 | }
93 | }
--------------------------------------------------------------------------------
/ObjectTracker/AppDelegate.swift:
--------------------------------------------------------------------------------
1 | //
2 | // AppDelegate.swift
3 | // ObjectTracker
4 | //
5 | // Created by Jeffrey Bergier on 6/8/17.
6 | // Copyright © 2017 Saturday Apps. All rights reserved.
7 | //
8 |
9 | import UIKit
10 |
11 | @UIApplicationMain
12 | class AppDelegate: UIResponder, UIApplicationDelegate {
13 |
14 | var window: UIWindow?
15 |
16 |
17 | func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplicationLaunchOptionsKey: Any]?) -> Bool {
18 | // Override point for customization after application launch.
19 | return true
20 | }
21 |
22 | func applicationWillResignActive(_ application: UIApplication) {
23 | // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state.
24 | // Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game.
25 | }
26 |
27 | func applicationDidEnterBackground(_ application: UIApplication) {
28 | // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later.
29 | // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits.
30 | }
31 |
32 | func applicationWillEnterForeground(_ application: UIApplication) {
33 | // Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background.
34 | }
35 |
36 | func applicationDidBecomeActive(_ application: UIApplication) {
37 | // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface.
38 | }
39 |
40 | func applicationWillTerminate(_ application: UIApplication) {
41 | // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:.
42 | }
43 |
44 |
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/ObjectTracker/ViewController.swift:
--------------------------------------------------------------------------------
1 | //
2 | // ViewController.swift
3 | // ObjectTracker
4 | //
5 | // Created by Jeffrey Bergier on 6/8/17.
6 | // Copyright © 2017 Saturday Apps. All rights reserved.
7 | //
8 |
9 | import AVFoundation
10 | import Vision
11 | import UIKit
12 |
13 | class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
14 |
15 | @IBOutlet private weak var cameraView: UIView?
16 | @IBOutlet private weak var highlightView: UIView? {
17 | didSet {
18 | self.highlightView?.layer.borderColor = UIColor.red.cgColor
19 | self.highlightView?.layer.borderWidth = 4
20 | self.highlightView?.backgroundColor = .clear
21 | }
22 | }
23 |
24 | private let visionSequenceHandler = VNSequenceRequestHandler()
25 | private lazy var cameraLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
26 | private lazy var captureSession: AVCaptureSession = {
27 | let session = AVCaptureSession()
28 | session.sessionPreset = AVCaptureSession.Preset.photo
29 | guard
30 | let backCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back),
31 | let input = try? AVCaptureDeviceInput(device: backCamera)
32 | else { return session }
33 | session.addInput(input)
34 | return session
35 | }()
36 |
37 | override func viewDidLoad() {
38 | super.viewDidLoad()
39 |
40 | // hide the red focus area on load
41 | self.highlightView?.frame = .zero
42 |
43 | // make the camera appear on the screen
44 | self.cameraView?.layer.addSublayer(self.cameraLayer)
45 |
46 | // register to receive buffers from the camera
47 | let videoOutput = AVCaptureVideoDataOutput()
48 | videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "MyQueue"))
49 | self.captureSession.addOutput(videoOutput)
50 |
51 | // begin the session
52 | self.captureSession.startRunning()
53 | }
54 |
55 | override func viewDidLayoutSubviews() {
56 | super.viewDidLayoutSubviews()
57 |
58 | // make sure the layer is the correct size
59 | self.cameraLayer.frame = self.cameraView?.bounds ?? .zero
60 | }
61 |
62 | private var lastObservation: VNDetectedObjectObservation?
63 |
64 | func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
65 | guard
66 | // make sure the pixel buffer can be converted
67 | let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
68 | // make sure that there is a previous observation we can feed into the request
69 | let lastObservation = self.lastObservation
70 | else { return }
71 |
72 | // create the request
73 | let request = VNTrackObjectRequest(detectedObjectObservation: lastObservation, completionHandler: self.handleVisionRequestUpdate)
74 | // set the accuracy to high
75 | // this is slower, but it works a lot better
76 | request.trackingLevel = .accurate
77 |
78 | // perform the request
79 | do {
80 | try self.visionSequenceHandler.perform([request], on: pixelBuffer)
81 | } catch {
82 | print("Throws: \(error)")
83 | }
84 | }
85 |
86 | private func handleVisionRequestUpdate(_ request: VNRequest, error: Error?) {
87 | // Dispatch to the main queue because we are touching non-atomic, non-thread safe properties of the view controller
88 | DispatchQueue.main.async {
89 | // make sure we have an actual result
90 | guard let newObservation = request.results?.first as? VNDetectedObjectObservation else { return }
91 |
92 | // prepare for next loop
93 | self.lastObservation = newObservation
94 |
95 | // check the confidence level before updating the UI
96 | guard newObservation.confidence >= 0.3 else {
97 | // hide the rectangle when we lose accuracy so the user knows something is wrong
98 | self.highlightView?.frame = .zero
99 | return
100 | }
101 |
102 | // calculate view rect
103 | var transformedRect = newObservation.boundingBox
104 | transformedRect.origin.y = 1 - transformedRect.origin.y
105 | let convertedRect = self.cameraLayer.layerRectConverted(fromMetadataOutputRect: transformedRect)
106 |
107 | // move the highlight view
108 | self.highlightView?.frame = convertedRect
109 | }
110 | }
111 |
112 | @IBAction private func userTapped(_ sender: UITapGestureRecognizer) {
113 | // get the center of the tap
114 | self.highlightView?.frame.size = CGSize(width: 120, height: 120)
115 | self.highlightView?.center = sender.location(in: self.view)
116 |
117 | // // convert the rect for the initial observation
118 | let originalRect = self.highlightView?.frame ?? .zero
119 | var convertedRect = self.cameraLayer.metadataOutputRectConverted(fromLayerRect: originalRect)
120 | convertedRect.origin.y = 1 - convertedRect.origin.y
121 |
122 | // set the observation
123 | let newObservation = VNDetectedObjectObservation(boundingBox: convertedRect)
124 | self.lastObservation = newObservation
125 | }
126 |
127 | @IBAction private func resetTapped(_ sender: UIBarButtonItem) {
128 | self.lastObservation = nil
129 | self.highlightView?.frame = .zero
130 | }
131 | }
132 |
133 |
--------------------------------------------------------------------------------
/ObjectTracker/Base.lproj/Main.storyboard:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | 北京时间2017.6.6日凌晨1点,新一届的WWDC召开,苹果在大会上发布了iOS11的beta版,伴随着iOS 11的发布,也随之推出了一些新的API,如:ARKit 、Core ML、FileProvider、IdentityLookup 、Core NFC、Vison 等。
2 |
3 | 本篇文章主要简单介绍下其中的 Vision API 的使用(Vision更强大的地方是可以结合Core ML模型实现更强大的功能,本篇文章就不详细展开了)
4 |
5 | ## Vison 与 Core ML 的关系
6 | Vision 是 Apple 在 WWDC 2017 推出的图像识别框架。
7 |
8 | Core ML 是 Apple 在 WWDC 2017 推出的机器学习框架。
9 |
10 |
11 | ###Core ML
12 | 
13 | 根据这张图就可以看出,Core ML的作用就是将一个Core ML模型,转换成我们的App工程可以直接使用的对象,就是可以看做是一个模型的转换器。
14 |
15 | Vision在这里的角色,就是相当于一个用于识别Core ML模型的一个角色.
16 |
17 | ###Vision
18 | 
19 |
20 | * 根据官方文档看,Vision 本身就有Face Detection and Recognition(人脸检测识别)、Machine Learning Image Analysis(机器学习图片分析)、Barcode Detection(条形码检测)、Text Detection(文本检测)。。。。。等等这些功能。
21 |
22 | * 所以可以这样理解:
23 | Vision库里本身就已经自带了很多训练好的Core ML模型,这些模型是针对上面提到的人脸识别、条形码检测等等功能,如果你要实现的功能刚好是Vision库本身就能实现的,那么你直接使用Vision库自带的一些类和方法就行,但是如果想要更强大的功能,那么还是需要结合其它Core ML模型。
24 |
25 | ###Vision 与 Core ML 总结
26 | 
27 |
28 | * Core ML可以看做一个模型的转换器,可以将一个 ML Model 格式的模型文件自动生成一些类和方法,可以直接使用这些类去做分析,让我们更简单的在app中使用训练好的模型。
29 |
30 | * Vision本身就是能对图片做分析,他自带了针对很多检测的功能,相当于内置了一些Model,另外Vision也能使用一个你设置好的其它的Core ML Model来对图进行分析。
31 |
32 | * Vision就是建立在Core ML层之上的,使用Vision其实还是用到了Core ML,只是没有显式地直接写Core ML的代码而已。
33 |
34 | ## Vison 的应用场景
35 | * 图像配准
36 |
37 | * 矩形检测
38 | 
39 | * 二维码/条形码检测
40 | 
41 |
42 | * 目标跟踪:脸部,矩形和通用模板
43 | 
44 | * 文字检测:监测文字外框,和文字识别
45 | 
46 | * 人脸检测:支持检测笑脸、侧脸、局部遮挡脸部、戴眼镜和帽子等场景,可以标记出人脸的矩形区域
47 | 
48 | * 人脸特征点:可以标记出人脸和眼睛、眉毛、鼻子、嘴、牙齿的轮廓,以及人脸的中轴线
49 | 
50 |
51 | ## Vison 的设计理念
52 | 苹果最擅长的,把复杂的事情简单化,Vision的设计理念也正是如此。
53 | 对于使用者我们抽象的来说,我们只需要:提出问题-->经过机器-->得到结果。
54 |
55 | 开发者不需要是计算机视觉专家,开发者只需要得到结果即可,一切复杂的事情交给Vision。
56 | 
57 |
58 | ## Vison 的性能对比
59 | Vision 与 iOS 上其他几种带人脸检测功能框架的对比:
60 | 
61 | 根据官方提供的资料可以看出来,Vision 和 Core Image、AV Capture 在精确度,耗时,耗电量来看基本都是Best、Fast、Good。
62 |
63 | ###Vision 支持的图片类型
64 | Vision 支持多种图片类型,如:
65 | * CIImage
66 |
67 | * NSURL
68 |
69 | * NSData
70 |
71 | * CGImageRef
72 |
73 | * CVPixelBufferRef
74 |
75 | ## Vison 的使用 与结构图
76 | Vision使用中的角色有:
77 | Request,RequestHandler,results和results中的Observation数组。
78 |
79 | Request类型:
80 | 有很多种,比如图中列出的 人脸识别、特征识别、文本识别、二维码识别等。
81 | ###结果图
82 | 
83 |
84 | **使用概述:**
85 | 我们在使用过程中是给各种功能的 Request 提供给一个 RequestHandler,Handler 持有需要识别的图片信息,并将处理结果分发给每个 Request 的 completion Block 中。可以从 results 属性中得到 Observation 数组。
86 |
87 | observations数组中的内容根据不同的request请求返回了不同的observation,如:VNFaceObservation、VNTextObservation、VNBarcodeObservation、VNHorizonObservation,不同的Observation都继承于VNDetectedObjectObservation,而VNDetectedObjectObservation则是继承于VNObservation。每种Observation有boundingBox,landmarks等属性,存储的是识别后物体的坐标,点位等,我们拿到坐标后,就可以进行一些UI绘制。
88 |
89 | ###具体人脸识别使用示例:
90 | 1,创建处理图片处理对应的RequestHandler对象。
91 |
92 | ```
93 | // 转换CIImage
94 | CIImage *convertImage = [[CIImage alloc]initWithImage:image];
95 |
96 | // 创建处理requestHandler
97 | VNImageRequestHandler *detectRequestHandler = [[VNImageRequestHandler alloc]initWithCIImage:convertImage options:@{}];
98 | ```
99 |
100 | 2, 创建回调Handler。(用于识别成功后进行回调执行的一个Block)
101 |
102 | ```
103 | // 设置回调
104 | CompletionHandler completionHandler = ^(VNRequest *request, NSError * _Nullable error) {
105 | NSArray *observations = request.results;
106 | };
107 | ```
108 |
109 | 3, 创建对应的识别 Request 请求,指定 Complete Handler
110 |
111 | ```
112 | VNImageBasedRequest *detectRequest = [[VNDetectFaceRectanglesRequest alloc]initWithCompletionHandler: completionHandler];
113 | ```
114 |
115 | 4,发送识别请求,并在回调中处理回调接受的数据
116 |
117 | ```
118 | [detectRequestHandler performRequests:@[detectRequest] error:nil];
119 | ```
120 | ###代码整合:
121 | 总的来说一共经过这几步之后基本的人脸识别就实现了。
122 |
123 | ```
124 | // 转换CIImage
125 | CIImage *convertImage = [[CIImage alloc]initWithImage:image];
126 |
127 | // 创建处理requestHandler
128 | VNImageRequestHandler *detectRequestHandler = [[VNImageRequestHandler alloc]initWithCIImage:convertImage options:@{}];
129 |
130 | // 设置回调
131 | CompletionHandler completionHandler = ^(VNRequest *request, NSError * _Nullable error) {
132 | NSArray *observations = request.results;
133 | [self handleImageWithType:type image:image observations:observations complete:complete];
134 | };
135 |
136 | // 创建BaseRequest
137 | VNImageBasedRequest *detectRequest = [[VNDetectFaceRectanglesRequest alloc]initWithCompletionHandler:completionHandler];
138 |
139 | // 发送识别请求
140 | [detectRequestHandler performRequests:@[detectRequest] error:nil];
141 |
142 | ```
143 |
144 | ###VNFaceObservation 介绍:
145 | VNFaceObservation里面,我们能拿到的有用信息就是boundingBox。
146 |
147 | ```
148 | /// 处理人脸识别回调
149 | + (void)faceRectangles:(NSArray *)observations image:(UIImage *_Nullable)image complete:(detectImageHandler _Nullable )complete{
150 |
151 | NSMutableArray *tempArray = @[].mutableCopy;
152 |
153 | for (VNFaceObservation *observation in observations) {
154 | CGRect faceRect = [self convertRect:observation.boundingBox imageSize:image.size];
155 | }
156 |
157 | ```
158 |
159 | boundingBox直接是CGRect类型,但是boundingBox返回的是x,y,w,h的比例,需要进行转换。
160 |
161 | ```
162 | /// 转换Rect
163 | + (CGRect)convertRect:(CGRect)oldRect imageSize:(CGSize)imageSize{
164 |
165 | CGFloat w = oldRect.size.width * imageSize.width;
166 | CGFloat h = oldRect.size.height * imageSize.height;
167 | CGFloat x = oldRect.origin.x * imageSize.width;
168 | CGFloat y = imageSize.height - (oldRect.origin.y * imageSize.height) - h;
169 | return CGRectMake(x, y, w, h);
170 | }
171 | ```
172 | 关于Y值为何不是直接oldRect.origin.y * imageSize.height出来,是因为这个时候直接算出来的脸部是MAX Y值而不是min Y值,所以需要进行转换一下。
173 | 
174 |
175 | ###特征识别介绍:
176 | VNDetectFaceLandmarksRequest 特征识别请求返回的也是VNFaceObservation,但是这个时候VNFaceObservation 对象的 landmarks 属性就会有值,这个属性里面存储了人物面部特征的点。
177 | 如:
178 |
179 | ```
180 | // 脸部轮廊
181 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nonnull faceContour;
182 |
183 | // 左眼,右眼
184 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable leftEye;
185 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable rightEye;
186 |
187 | // 鼻子,鼻嵴
188 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable nose;
189 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable noseCrest;
190 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable medianLine;
191 |
192 | // 外唇,内唇
193 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable outerLips;
194 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable innerLips;
195 |
196 | // 左眉毛,右眉毛
197 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable leftEyebrow;
198 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable rightEyebrow;
199 |
200 | // 左瞳,右瞳
201 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable leftPupil;
202 | @property (nonatomic, strong) VNFaceLandmarkRegion2D * _Nullable rightPupil;
203 | ```
204 | 每个特征对象里面都有一个pointCount属性,通过特征对象的normalizedPoints方法,可以取出来特征里面的每一个点,我们拿到点进行转换后,相应的UI绘制或其他操作。
205 | 例如:
206 |
207 | ```
208 |
209 | UIImage *sourceImage = image;
210 |
211 | // 遍历所有特征
212 | for (VNFaceLandmarkRegion2D *landmarks2D in pointArray) {
213 |
214 | CGPoint points[landmarks2D.pointCount];
215 | // 转换特征的所有点
216 | for (int i=0; i