├── .github
└── workflows
│ └── feed.yml
├── .gitignore
├── CoreML.playgroundbook
└── Contents
│ ├── Chapters
│ └── CoreML.playgroundchapter
│ │ ├── Manifest.plist
│ │ └── Pages
│ │ ├── ObjectDetectionCamera.playgroundpage
│ │ ├── Manifest.plist
│ │ └── main.swift
│ │ ├── ObjectDetectionCameraVision.playgroundpage
│ │ ├── Manifest.plist
│ │ └── main.swift
│ │ ├── ObjectRecognitionCamera.playgroundpage
│ │ ├── Manifest.plist
│ │ └── main.swift
│ │ ├── ObjectRecognitionCameraVision.playgroundpage
│ │ ├── Manifest.plist
│ │ └── main.swift
│ │ ├── ObjectRecognitionImage.playgroundpage
│ │ ├── LiveView.swift
│ │ ├── Manifest.plist
│ │ └── main.swift
│ │ ├── ObjectRecognitionImageVision.playgroundpage
│ │ ├── LiveView.swift
│ │ ├── Manifest.plist
│ │ └── main.swift
│ │ └── Template.playgroundpage
│ │ ├── Manifest.plist
│ │ └── main.swift
│ ├── Manifest.plist
│ ├── PrivateResources
│ └── cover.png
│ ├── PublicResources
│ └── IMG_0032.JPG
│ └── UserModules
│ ├── PreviewViewController.playgroundmodule
│ └── Sources
│ │ └── PreviewViewController.swift
│ └── UserCode.playgroundmodule
│ └── Sources
│ └── UserSource.swift
├── LICENSE
├── README.md
└── images
├── banner.png
├── capture.gif
├── how-to-import-model.jpg
├── preview.png
└── thumbnail.png
/.github/workflows/feed.yml:
--------------------------------------------------------------------------------
1 | name: Publish feed.json
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | deploy:
9 | runs-on: ubuntu-18.04
10 | steps:
11 | - uses: actions/checkout@v2
12 |
13 | - name: Install SSH Client
14 | uses: webfactory/ssh-agent@v0.2.0
15 | with:
16 | ssh-private-key: ${{ secrets.DEPLOY_KEY }}
17 |
18 | - name: Checkout submodules
19 | shell: bash
20 | run: |
21 | auth_header="$(git config --local --get http.https://github.com/.extraheader)"
22 | git submodule sync --recursive
23 | git -c "http.extraheader=$auth_header" -c protocol.version=2 submodule update --init --force --recursive --depth=1
24 |
25 | - name: Build
26 | env:
27 | TITLE: Core ML Playground Book
28 | SUBTITLE: Machine Learning Examples
29 | PUBLISHER_NAME: Kenta Kubo
30 | FEED_IDENTIFIER: xyz.kebo.playgroundbook.coreml
31 | CONTACT_URL: https://kebo.xyz
32 | DOCUMENT_TITLE: CoreML
33 | DOCUMENT_OVERVIEW_SUBTITLE: Machine Learning Examples
34 | DOCUMENT_DESCRIPTION: Machine Learning Examples
35 | DOCUMENT_CONTENT_IDENTIFIER: xyz.kebo.playgroundbook.coreml.coreml
36 | DOCUMENT_URL: https://kebo.xyz/coreml-playground/CoreML.playgroundbook.zip
37 | PLAYGROUNDBOOK_FILENAME: CoreML.playgroundbook
38 | run: |
39 | zip -r "${PLAYGROUNDBOOK_FILENAME}.zip" "${PLAYGROUNDBOOK_FILENAME}"
40 | DOCUMENT_PUBLISHED_DATE=2019-05-06T02:40:00+0900
41 | DOCUMENT_LAST_UPDATED_DATE=$(date -Iseconds)
42 | TAG_NAME=$(echo $GITHUB_REF | sed -e 's/refs\/tags\///g')
43 | DOCUMENT_CONTENT_VERSION=$(echo $TAG_NAME | cut -c 2-)
44 | cat > feed.json << EOF
45 | {
46 | "title": "${TITLE}",
47 | "subtitle": "${SUBTITLE}",
48 | "publisherName": "${PUBLISHER_NAME}",
49 | "feedIdentifier": "${FEED_IDENTIFIER}",
50 | "contactURL": "${CONTACT_URL}",
51 | "formatVersion": "1.2",
52 | "documents": [
53 | {
54 | "title": "${DOCUMENT_TITLE}",
55 | "overviewSubtitle": "${DOCUMENT_OVERVIEW_SUBTITLE}",
56 | "description": "${DOCUMENT_DESCRIPTION}",
57 | "contentIdentifier": "${DOCUMENT_CONTENT_IDENTIFIER}",
58 | "contentVersion": "${DOCUMENT_CONTENT_VERSION}",
59 | "url": "${DOCUMENT_URL}",
60 | "publishedDate": "${DOCUMENT_PUBLISHED_DATE}",
61 | "lastUpdatedDate": "${DOCUMENT_LAST_UPDATED_DATE}",
62 | "thumbnailURL": "https://raw.githubusercontent.com/kkk669/coreml-playground/${TAG_NAME}/images/thumbnail.png",
63 | "bannerImageURL": "https://raw.githubusercontent.com/kkk669/coreml-playground/${TAG_NAME}/images/banner.png",
64 | "previewImageURLs": [
65 | "https://raw.githubusercontent.com/kkk669/coreml-playground/${TAG_NAME}/images/preview.png"
66 | ],
67 | "additionalInformation": [
68 | {
69 | "name": "Languages",
70 | "value": "English"
71 | }
72 | ]
73 | }
74 | ]
75 | }
76 | EOF
77 | mkdir public && mv "${PLAYGROUNDBOOK_FILENAME}.zip" feed.json public/
78 |
79 | - name: Deploy to GitHub Pages
80 | uses: JamesIves/github-pages-deploy-action@releases/v3
81 | with:
82 | SSH: true
83 | BRANCH: gh-pages
84 | FOLDER: public
85 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.mlmodel
2 | CoreML.playgroundbook/Edits
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | CoreML
7 | TemplatePageFilename
8 | Template.playgroundpage
9 | InitialUserPages
10 |
11 | ObjectRecognitionImage.playgroundpage
12 | ObjectRecognitionImageVision.playgroundpage
13 | ObjectRecognitionCamera.playgroundpage
14 | ObjectRecognitionCameraVision.playgroundpage
15 | ObjectDetectionCamera.playgroundpage
16 | ObjectDetectionCameraVision.playgroundpage
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectDetectionCamera.playgroundpage/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | ObjectDetectionCamera
7 | LiveViewEdgeToEdge
8 |
9 | LiveViewMode
10 | VisibleByDefault
11 | PlaygroundLoggingMode
12 | Off
13 | UserModuleSourceFilesToOpen
14 |
15 | UserModules/PreviewViewController.playgroundmodule/Sources/PreviewViewController.swift
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectDetectionCamera.playgroundpage/main.swift:
--------------------------------------------------------------------------------
1 | import ARKit
2 | import PlaygroundSupport
3 | import UIKit
4 | import Vision
5 |
6 | // Parameters
7 | // The model is from here: https://docs-assets.developer.apple.com/coreml/models/Image/ObjectDetection/YOLOv3Tiny/YOLOv3TinyInt8LUT.mlmodel
8 | let config = MLModelConfiguration()
9 | config.allowLowPrecisionAccumulationOnGPU = true
10 | config.computeUnits = .all
11 | let model = try MLModel(contentsOf: try MLModel.compileModel(at: #fileLiteral(resourceName: "YOLOv3TinyInt8LUT.mlmodel")), configuration: config)
12 | let inputName = "image"
13 | let iouThresholdName = "iouThreshold"
14 | let confidenceThresholdName = "confidenceThreshold"
15 | let outputName = "coordinates"
16 | let iouThreshold = 0.5
17 | let confidenceThreshold = 0.3
18 | let imageConstraint = model.modelDescription
19 | .inputDescriptionsByName[inputName]!
20 | .imageConstraint!
21 | let imageOptions: [MLFeatureValue.ImageOption: Any] = [
22 | .cropAndScale: VNImageCropAndScaleOption.scaleFill.rawValue
23 | ]
24 |
25 | // ViewControllers
26 | final class ViewController: PreviewViewController {
27 | let fpsLabel: UILabel = {
28 | let label = UILabel()
29 | label.translatesAutoresizingMaskIntoConstraints = false
30 | label.textColor = #colorLiteral(red: 1.0, green: 0.0, blue: 0.0, alpha: 1.0)
31 | label.text = "fps: -"
32 | return label
33 | }()
34 | let bboxLayer = CALayer()
35 |
36 | override func viewDidLoad() {
37 | super.viewDidLoad()
38 |
39 | self.arView.session.delegateQueue = .global(qos: .userInteractive)
40 | self.arView.session.delegate = self
41 |
42 | self.view.layer.addSublayer(self.bboxLayer)
43 | self.view.addSubview(self.fpsLabel)
44 |
45 | NSLayoutConstraint.activate([
46 | self.fpsLabel.bottomAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.bottomAnchor),
47 | ])
48 | }
49 |
50 | override func viewWillLayoutSubviews() {
51 | super.viewWillLayoutSubviews()
52 | self.bboxLayer.position = CGPoint(x: self.view.bounds.midX, y: self.view.bounds.midY)
53 | }
54 |
55 | func detect(input: MLFeatureProvider) -> MLFeatureProvider {
56 | let start = Date()
57 | let result = try! model.prediction(from: input)
58 | let fps = 1 / Date().timeIntervalSince(start)
59 | DispatchQueue.main.async {
60 | self.fpsLabel.text = "fps: \(fps)"
61 | }
62 | return result
63 | }
64 |
65 | func drawResult(result: MLFeatureProvider) {
66 | CATransaction.begin()
67 | CATransaction.setValue(kCFBooleanTrue, forKey: kCATransactionDisableActions)
68 |
69 | // Remove all bboxes
70 | self.bboxLayer.sublayers = nil
71 |
72 | let coordinates = result.featureValue(for: "coordinates")!
73 | .multiArrayValue!
74 | let confidence = result.featureValue(for: "confidence")!
75 | .multiArrayValue!
76 | let num_det = coordinates.shape[0].uintValue
77 |
78 | let imgSize = self.bboxLayer.bounds.size
79 |
80 | for i in 0..(OpaquePointer(confidence.dataPointer.advanced(by: i.intValue)))
91 | let (id, conf) = argmax(featurePointer, count: num_cls)
92 | let cls = coco_classes[id]
93 |
94 | // Render a bounding box
95 | let shapeLayer = CALayer()
96 | shapeLayer.borderColor = #colorLiteral(red: 1.0, green: 0.0, blue: 0.0, alpha: 1.0)
97 | shapeLayer.borderWidth = 2
98 | shapeLayer.bounds = bbox
99 | shapeLayer.position = CGPoint(x: bbox.midX, y: bbox.midY)
100 |
101 | // Render a description
102 | let textLayer = CATextLayer()
103 | textLayer.string = "\(cls): \(conf)"
104 | textLayer.font = UIFont.preferredFont(forTextStyle: .body)
105 | textLayer.bounds = CGRect(x: 0, y: 0, width: bbox.width - 10, height: bbox.height - 10)
106 | textLayer.position = CGPoint(x: bbox.midX, y: bbox.midY)
107 | textLayer.foregroundColor = #colorLiteral(red: 1.0, green: 0.0, blue: 0.0, alpha: 1.0)
108 | textLayer.contentsScale = 2.0 // Retina Display
109 | textLayer.setAffineTransform(CGAffineTransform(scaleX: 1, y: -1))
110 |
111 | shapeLayer.addSublayer(textLayer)
112 | self.bboxLayer.addSublayer(shapeLayer)
113 | }
114 |
115 | CATransaction.commit()
116 | }
117 | }
118 |
119 | extension ViewController: ARSessionDelegate {
120 | func session(_ session: ARSession, didUpdate frame: ARFrame) {
121 | let imageBuffer = frame.capturedImage
122 |
123 | let orientation = CGImagePropertyOrientation(interfaceOrientation: UIScreen.main.orientation)
124 | let ciImage = CIImage(cvPixelBuffer: imageBuffer).oriented(orientation)
125 | let context = CIContext(options: [.useSoftwareRenderer: false])
126 | let cgImage = context.createCGImage(ciImage, from: ciImage.extent)!
127 |
128 | let size = CGSize(width: cgImage.width, height: cgImage.height)
129 | let scale = self.view.bounds.size / size
130 | let maxScale = fmax(scale.width, scale.height)
131 | CATransaction.begin()
132 | CATransaction.setValue(kCFBooleanTrue, forKey: kCATransactionDisableActions)
133 | self.bboxLayer.setAffineTransform(CGAffineTransform(scaleX: maxScale, y: -maxScale))
134 | self.bboxLayer.bounds = CGRect(origin: .zero, size: size)
135 | self.bboxLayer.position = CGPoint(x: self.view.bounds.midX, y: self.view.bounds.midY)
136 | CATransaction.commit()
137 |
138 | // var cgImage: CGImage!
139 | // VTCreateCGImageFromCVPixelBuffer(imageBuffer, options: nil, imageOut: &cgImage)
140 | let featureValue = try! MLFeatureValue(cgImage: cgImage, constraint: imageConstraint, options: imageOptions)
141 | let input = try! MLDictionaryFeatureProvider(dictionary: [
142 | inputName: featureValue,
143 | iouThresholdName: iouThreshold,
144 | confidenceThresholdName: confidenceThreshold,
145 | ])
146 |
147 | let output = self.detect(input:input)
148 | self.drawResult(result: output)
149 | }
150 | }
151 |
152 | PlaygroundPage.current.wantsFullScreenLiveView = true
153 | PlaygroundPage.current.liveView = ViewController()
154 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectDetectionCameraVision.playgroundpage/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | ObjectDetectionCamera w/ Vision
7 | LiveViewEdgeToEdge
8 |
9 | LiveViewMode
10 | VisibleByDefault
11 | PlaygroundLoggingMode
12 | Off
13 | UserModuleSourceFilesToOpen
14 |
15 | UserModules/PreviewViewController.playgroundmodule/Sources/PreviewViewController.swift
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectDetectionCameraVision.playgroundpage/main.swift:
--------------------------------------------------------------------------------
1 | import ARKit
2 | import PlaygroundSupport
3 | import UIKit
4 | import Vision
5 |
6 | // Parameters
7 | // The model is from here: https://docs-assets.developer.apple.com/coreml/models/Image/ObjectDetection/YOLOv3Tiny/YOLOv3TinyInt8LUT.mlmodel
8 | let config = MLModelConfiguration()
9 | config.allowLowPrecisionAccumulationOnGPU = true
10 | config.computeUnits = .all
11 | let model = try compileModel(at: #fileLiteral(resourceName: "YOLOv3TinyInt8LUT.mlmodel"), configuration: config)
12 | model.featureProvider = try MLDictionaryFeatureProvider(dictionary: [
13 | "iouThreshold": 0.5,
14 | "confidenceThreshold": 0.3,
15 | ])
16 |
17 | // ViewControllers
18 | final class ViewController: PreviewViewController {
19 | let bboxLayer = CALayer()
20 |
21 | lazy var request: VNCoreMLRequest = {
22 | let request = VNCoreMLRequest(model: model, completionHandler: self.processDetections)
23 | request.imageCropAndScaleOption = .scaleFill
24 | return request
25 | }()
26 |
27 | override func viewDidLoad() {
28 | super.viewDidLoad()
29 |
30 | self.arView.session.delegate = self
31 |
32 | self.view.layer.addSublayer(self.bboxLayer)
33 | }
34 |
35 | override func viewWillLayoutSubviews() {
36 | super.viewWillLayoutSubviews()
37 | self.bboxLayer.position = CGPoint(x: self.view.bounds.midX, y: self.view.bounds.midY)
38 | }
39 |
40 | func detect(imageBuffer: CVImageBuffer, orientation: CGImagePropertyOrientation) {
41 | try! VNImageRequestHandler(cvPixelBuffer: imageBuffer, orientation: orientation)
42 | .perform([self.request])
43 | }
44 |
45 | func processDetections(for request: VNRequest, error: Error?) {
46 | CATransaction.begin()
47 | CATransaction.setValue(kCFBooleanTrue, forKey: kCATransactionDisableActions)
48 |
49 | // Remove all bboxes
50 | self.bboxLayer.sublayers = nil
51 |
52 | request.results?
53 | .lazy
54 | .compactMap { $0 as? VNRecognizedObjectObservation }
55 | .forEach {
56 | let imgSize = self.bboxLayer.bounds.size;
57 | let bbox = VNImageRectForNormalizedRect($0.boundingBox, Int(imgSize.width), Int(imgSize.height))
58 | let cls = $0.labels[0]
59 |
60 | // Render a bounding box
61 | let shapeLayer = CALayer()
62 | shapeLayer.borderColor = #colorLiteral(red: 1.0, green: 0.0, blue: 0.0, alpha: 1.0)
63 | shapeLayer.borderWidth = 2
64 | shapeLayer.bounds = bbox
65 | shapeLayer.position = CGPoint(x: bbox.midX, y: bbox.midY)
66 |
67 | // Render a description
68 | let textLayer = CATextLayer()
69 | textLayer.string = "\(cls.identifier): \(cls.confidence)"
70 | textLayer.font = UIFont.preferredFont(forTextStyle: .body)
71 | textLayer.bounds = CGRect(x: 0, y: 0, width: bbox.width - 10, height: bbox.height - 10)
72 | textLayer.position = CGPoint(x: bbox.midX, y: bbox.midY)
73 | textLayer.foregroundColor = #colorLiteral(red: 1.0, green: 0.0, blue: 0.0, alpha: 1.0)
74 | textLayer.contentsScale = 2.0 // Retina Display
75 | textLayer.setAffineTransform(CGAffineTransform(scaleX: 1, y: -1))
76 |
77 | shapeLayer.addSublayer(textLayer)
78 | self.bboxLayer.addSublayer(shapeLayer)
79 | }
80 |
81 | CATransaction.commit()
82 | }
83 | }
84 |
85 | extension ViewController: ARSessionDelegate {
86 | func session(_ session: ARSession, didUpdate frame: ARFrame) {
87 | let imageBuffer = frame.capturedImage
88 |
89 | let orientation = CGImagePropertyOrientation(interfaceOrientation: UIScreen.main.orientation)
90 |
91 | var size = CVImageBufferGetDisplaySize(imageBuffer)
92 | if orientation == .right || orientation == .left {
93 | size = CGSize(width: size.height, height: size.width)
94 | }
95 | let scale = self.view.bounds.size / size
96 | let maxScale = fmax(scale.width, scale.height)
97 | CATransaction.begin()
98 | CATransaction.setValue(kCFBooleanTrue, forKey: kCATransactionDisableActions)
99 | self.bboxLayer.setAffineTransform(CGAffineTransform(scaleX: maxScale, y: -maxScale))
100 | self.bboxLayer.bounds = CGRect(origin: .zero, size: size)
101 | self.bboxLayer.position = CGPoint(x: self.view.bounds.midX, y: self.view.bounds.midY)
102 | CATransaction.commit()
103 |
104 | self.detect(imageBuffer: imageBuffer, orientation: orientation)
105 | }
106 | }
107 |
108 | PlaygroundPage.current.wantsFullScreenLiveView = true
109 | PlaygroundPage.current.liveView = ViewController()
110 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionCamera.playgroundpage/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | ObjectRecognitionCamera
7 | LiveViewEdgeToEdge
8 |
9 | LiveViewMode
10 | VisibleByDefault
11 | PlaygroundLoggingMode
12 | Off
13 | UserModuleSourceFilesToOpen
14 |
15 | UserModules/PreviewViewController.playgroundmodule/Sources/PreviewViewController.swift
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionCamera.playgroundpage/main.swift:
--------------------------------------------------------------------------------
1 | import ARKit
2 | import PlaygroundSupport
3 | import UIKit
4 | import Vision
5 |
6 | // Parameters
7 | // The model is from here: https://docs-assets.developer.apple.com/coreml/models/Image/ImageClassification/MobileNetV2/MobileNetV2Int8LUT.mlmodel
8 | let config = MLModelConfiguration()
9 | config.allowLowPrecisionAccumulationOnGPU = true
10 | config.computeUnits = .all
11 | let model = try MLModel(contentsOf: try MLModel.compileModel(at: #fileLiteral(resourceName: "MobileNetV2Int8LUT.mlmodel")), configuration: config)
12 | let inputName = "image"
13 | let outputName = "classLabelProbs"
14 | let threshold: Float = 0.5
15 | let imageConstraint = model.modelDescription
16 | .inputDescriptionsByName[inputName]!
17 | .imageConstraint!
18 | let imageOptions: [MLFeatureValue.ImageOption: Any] = [
19 | .cropAndScale: VNImageCropAndScaleOption.scaleFill.rawValue
20 | ]
21 |
22 | // ViewControllers
23 | final class ViewController: PreviewViewController {
24 | let fpsLabel: UILabel = {
25 | let label = UILabel()
26 | label.translatesAutoresizingMaskIntoConstraints = false
27 | label.textColor = #colorLiteral(red: 1.0, green: 0.0, blue: 0.0, alpha: 1.0)
28 | label.text = "fps: -"
29 | return label
30 | }()
31 | let classesLabel: UILabel = {
32 | let label = UILabel()
33 | label.translatesAutoresizingMaskIntoConstraints = false
34 | label.textAlignment = .center
35 | label.backgroundColor = #colorLiteral(red: 0.258823543787003, green: 0.756862759590149, blue: 0.968627452850342, alpha: 0.5)
36 | label.text = "Nothing is detected."
37 | return label
38 | }()
39 |
40 | override func viewDidLoad() {
41 | super.viewDidLoad()
42 |
43 | self.arView.session.delegateQueue = .global(qos: .userInteractive)
44 | self.arView.session.delegate = self
45 |
46 | self.view.addSubview(self.classesLabel)
47 | self.view.addSubview(self.fpsLabel)
48 |
49 | NSLayoutConstraint.activate([
50 | self.classesLabel.bottomAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.bottomAnchor),
51 | self.classesLabel.leadingAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.leadingAnchor),
52 | self.classesLabel.trailingAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.trailingAnchor),
53 | self.fpsLabel.bottomAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.bottomAnchor),
54 | ])
55 | }
56 |
57 | func detect(input: MLFeatureProvider) -> MLFeatureProvider {
58 | let start = Date()
59 | let result = try! model.prediction(from: input)
60 | let fps = 1 / Date().timeIntervalSince(start)
61 | DispatchQueue.main.async {
62 | self.fpsLabel.text = "fps: \(fps)"
63 | }
64 | return result
65 | }
66 |
67 | func drawResult(result: MLFeatureProvider) {
68 | DispatchQueue.main.async {
69 | self.classesLabel.text = ""
70 | }
71 |
72 | result.featureValue(for: outputName)?
73 | .dictionaryValue
74 | .lazy
75 | .filter { $0.1.floatValue >= threshold }
76 | .sorted { $0.1.floatValue > $1.1.floatValue }
77 | .forEach { name, confidence in
78 | DispatchQueue.main.async {
79 | self.classesLabel.text?.append("\(name): \(confidence)\n")
80 | }
81 | }
82 | }
83 | }
84 |
85 | extension ViewController: ARSessionDelegate {
86 | func session(_ session: ARSession, didUpdate frame: ARFrame) {
87 | let imageBuffer = frame.capturedImage
88 |
89 | let orientation = CGImagePropertyOrientation(interfaceOrientation: UIScreen.main.orientation)
90 | let ciImage = CIImage(cvPixelBuffer: imageBuffer).oriented(orientation)
91 | let context = CIContext(options: [.useSoftwareRenderer: false])
92 | let cgImage = context.createCGImage(ciImage, from: ciImage.extent)!
93 |
94 | let featureValue = try! MLFeatureValue(cgImage: cgImage, constraint: imageConstraint, options: imageOptions)
95 | let input = try! MLDictionaryFeatureProvider(dictionary: [inputName: featureValue])
96 |
97 | let output = self.detect(input: input)
98 | self.drawResult(result: output)
99 | }
100 | }
101 |
102 | PlaygroundPage.current.wantsFullScreenLiveView = true
103 | PlaygroundPage.current.liveView = ViewController()
104 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionCameraVision.playgroundpage/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | ObjectRecognitionCamera w/ Vision
7 | LiveViewEdgeToEdge
8 |
9 | LiveViewMode
10 | VisibleByDefault
11 | PlaygroundLoggingMode
12 | Off
13 | UserModuleSourceFilesToOpen
14 |
15 | UserModules/PreviewViewController.playgroundmodule/Sources/PreviewViewController.swift
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionCameraVision.playgroundpage/main.swift:
--------------------------------------------------------------------------------
1 | import ARKit
2 | import PlaygroundSupport
3 | import UIKit
4 | import Vision
5 |
6 | // Parameters
7 | // The model is from here: https://docs-assets.developer.apple.com/coreml/models/Image/ImageClassification/MobileNetV2/MobileNetV2Int8LUT.mlmodel
8 | let config = MLModelConfiguration()
9 | config.allowLowPrecisionAccumulationOnGPU = true
10 | config.computeUnits = .all
11 | let model = try! compileModel(at: #fileLiteral(resourceName: "MobileNetV2Int8LUT.mlmodel"), configuration: config)
12 | let threshold: Float = 0.5
13 |
14 | // ViewControllers
15 | final class ViewController: PreviewViewController {
16 | let classesLabel: UILabel = {
17 | let label = UILabel()
18 | label.translatesAutoresizingMaskIntoConstraints = false
19 | label.textAlignment = .center
20 | label.backgroundColor = #colorLiteral(red: 0.258823543787003, green: 0.756862759590149, blue: 0.968627452850342, alpha: 0.5)
21 | label.text = "Nothing is detected."
22 | return label
23 | }()
24 |
25 | lazy var request: VNCoreMLRequest = {
26 | let request = VNCoreMLRequest(model: model, completionHandler: self.processClassifications)
27 | request.imageCropAndScaleOption = .scaleFill
28 | return request
29 | }()
30 |
31 | override func viewDidLoad() {
32 | super.viewDidLoad()
33 |
34 | self.arView.session.delegate = self
35 |
36 | self.view.addSubview(self.classesLabel)
37 |
38 | NSLayoutConstraint.activate([
39 | self.classesLabel.bottomAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.bottomAnchor),
40 | self.classesLabel.leadingAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.leadingAnchor),
41 | self.classesLabel.trailingAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.trailingAnchor),
42 | ])
43 | }
44 |
45 | func detect(imageBuffer: CVImageBuffer, orientation: CGImagePropertyOrientation) {
46 | try! VNImageRequestHandler(cvPixelBuffer: imageBuffer, orientation: orientation)
47 | .perform([self.request])
48 | }
49 |
50 | func processClassifications(for request: VNRequest, error: Error?) {
51 | self.classesLabel.text = ""
52 |
53 | request.results?
54 | .lazy
55 | .compactMap { $0 as? VNClassificationObservation }
56 | .filter { $0.confidence >= threshold }
57 | .forEach { cls in
58 | self.classesLabel.text?.append("\(cls.identifier): \(cls.confidence)\n")
59 | }
60 | }
61 | }
62 |
63 | extension ViewController: ARSessionDelegate {
64 | func session(_ session: ARSession, didUpdate frame: ARFrame) {
65 | self.detect(
66 | imageBuffer: frame.capturedImage,
67 | orientation: CGImagePropertyOrientation(interfaceOrientation: UIScreen.main.orientation)
68 | )
69 | }
70 | }
71 |
72 | PlaygroundPage.current.wantsFullScreenLiveView = true
73 | PlaygroundPage.current.liveView = ViewController()
74 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionImage.playgroundpage/LiveView.swift:
--------------------------------------------------------------------------------
1 | import UIKit
2 | import PlaygroundSupport
3 |
4 | let imageView = UIImageView(image: #imageLiteral(resourceName: "IMG_0032.JPG"))
5 | imageView.contentMode = .scaleAspectFit
6 | let label = UILabel()
7 | label.textAlignment = .center
8 | label.backgroundColor = #colorLiteral(red: 0.258823543787003, green: 0.756862759590149, blue: 0.968627452850342, alpha: 1.0)
9 | label.text = "class: confidence"
10 | let stackView = UIStackView(arrangedSubviews: [imageView, label])
11 | stackView.axis = .vertical
12 | PlaygroundPage.current.liveView = stackView
13 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionImage.playgroundpage/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | ObjectRecognitionImage
7 | LiveViewEdgeToEdge
8 |
9 | LiveViewMode
10 | VisibleByDefault
11 | PlaygroundLoggingMode
12 | Off
13 | UserModuleSourceFilesToOpen
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionImage.playgroundpage/main.swift:
--------------------------------------------------------------------------------
1 | import Vision
2 | import UIKit
3 | import PlaygroundSupport
4 |
5 | // Parameters
6 | // The model is from here: https://docs-assets.developer.apple.com/coreml/models/Image/ImageClassification/MobileNetV2/MobileNetV2Int8LUT.mlmodel
7 | let model = try MLModel(contentsOf: try MLModel.compileModel(at: #fileLiteral(resourceName: "MobileNetV2Int8LUT.mlmodel")))
8 | let inputName = "image"
9 | let outputName = "classLabelProbs"
10 | let uiImage = #imageLiteral(resourceName: "IMG_0032.JPG")
11 | let threshold: Float = 0.5
12 |
13 | // Views
14 | let imageView = UIImageView(image: uiImage)
15 | imageView.contentMode = .scaleAspectFit
16 | let stackView = UIStackView(arrangedSubviews: [imageView])
17 | stackView.axis = .vertical
18 | PlaygroundPage.current.liveView = stackView
19 |
20 | // Object Recognition
21 | let imageConstraint = model.modelDescription
22 | .inputDescriptionsByName[inputName]!
23 | .imageConstraint!
24 | let imageOptions: [MLFeatureValue.ImageOption: Any] = [
25 | .cropAndScale: VNImageCropAndScaleOption.scaleFill.rawValue
26 | ]
27 | let featureValue = try MLFeatureValue(cgImage: uiImage.cgImage!, constraint: imageConstraint, options: imageOptions)
28 | let featureProvider = try MLDictionaryFeatureProvider(dictionary: [inputName: featureValue])
29 | let result = try model.prediction(from: featureProvider)
30 | result.featureValue(for: outputName)?
31 | .dictionaryValue
32 | .lazy
33 | .filter { $0.1.floatValue >= threshold }
34 | .sorted { $0.1.floatValue > $1.1.floatValue }
35 | .map { name, confidence in
36 | let label = UILabel()
37 | label.textAlignment = .center
38 | label.backgroundColor = #colorLiteral(red: 0.258823543787003, green: 0.756862759590149, blue: 0.968627452850342, alpha: 1.0)
39 | label.text = "\(name) \(confidence)"
40 | return label
41 | }
42 | .forEach { label in
43 | DispatchQueue.main.async {
44 | stackView.addArrangedSubview(label)
45 | }
46 | }
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionImageVision.playgroundpage/LiveView.swift:
--------------------------------------------------------------------------------
1 | import UIKit
2 | import PlaygroundSupport
3 |
4 | let imageView = UIImageView(image: #imageLiteral(resourceName: "IMG_0032.JPG"))
5 | imageView.contentMode = .scaleAspectFit
6 | let label = UILabel()
7 | label.textAlignment = .center
8 | label.backgroundColor = #colorLiteral(red: 0.258823543787003, green: 0.756862759590149, blue: 0.968627452850342, alpha: 1.0)
9 | label.text = "class: confidence"
10 | let stackView = UIStackView(arrangedSubviews: [imageView, label])
11 | stackView.axis = .vertical
12 | PlaygroundPage.current.liveView = stackView
13 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionImageVision.playgroundpage/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | ObjectRecognitionImage w/ Vision
7 | LiveViewEdgeToEdge
8 |
9 | LiveViewMode
10 | VisibleByDefault
11 | PlaygroundLoggingMode
12 | Off
13 | UserModuleSourceFilesToOpen
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/ObjectRecognitionImageVision.playgroundpage/main.swift:
--------------------------------------------------------------------------------
1 | import Vision
2 | import UIKit
3 | import PlaygroundSupport
4 |
5 | // Parameters
6 | // The model is from here: https://docs-assets.developer.apple.com/coreml/models/Image/ImageClassification/MobileNetV2/MobileNetV2Int8LUT.mlmodel
7 | let model = try compileModel(at: #fileLiteral(resourceName: "MobileNetV2Int8LUT.mlmodel"))
8 | let uiImage = #imageLiteral(resourceName: "IMG_0032.JPG")
9 | let threshold: Float = 0.5
10 |
11 | // Views
12 | let imageView = UIImageView(image: uiImage)
13 | imageView.contentMode = .scaleAspectFit
14 | let stackView = UIStackView(arrangedSubviews: [imageView])
15 | stackView.axis = .vertical
16 | PlaygroundPage.current.liveView = stackView
17 |
18 | // Object Recognition
19 | let request = VNCoreMLRequest(model: model) { request, error in
20 | request.results?
21 | .lazy
22 | .compactMap { $0 as? VNClassificationObservation }
23 | .filter { $0.confidence >= threshold }
24 | .map {
25 | let label = UILabel()
26 | label.textAlignment = .center
27 | label.backgroundColor = #colorLiteral(red: 0.258823543787003, green: 0.756862759590149, blue: 0.968627452850342, alpha: 1.0)
28 | label.text = "\($0.identifier): \($0.confidence)"
29 | return label
30 | }
31 | .forEach { label in
32 | DispatchQueue.main.async {
33 | stackView.addArrangedSubview(label)
34 | }
35 | }
36 | }
37 | request.imageCropAndScaleOption = .scaleFill
38 |
39 | guard let ciImage = CIImage(image: uiImage) else { fatalError() }
40 | try VNImageRequestHandler(ciImage: ciImage).perform([request])
41 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/Template.playgroundpage/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Name
6 | Template Page
7 | LiveViewEdgeToEdge
8 |
9 | LiveViewMode
10 | HiddenByDefault
11 |
12 |
13 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/Template.playgroundpage/main.swift:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/CoreML.playgroundbook/Contents/Chapters/CoreML.playgroundchapter/Pages/Template.playgroundpage/main.swift
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/Manifest.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Chapters
6 |
7 | CoreML.playgroundchapter
8 |
9 | ContentIdentifier
10 | xyz.kebo.playgroundbook.coreml.coreml
11 | ContentVersion
12 | 1.6.0
13 | DeploymentTarget
14 | ios-current
15 | DevelopmentRegion
16 | en-US
17 | ImageReference
18 | cover.png
19 | Name
20 | CoreML
21 | Version
22 | 7.0
23 | SwiftVersion
24 | 5.1
25 | MinimumSwiftPlaygroundsVersion
26 | 3.1
27 | UserModuleMode
28 | Full
29 | UserAutoImportedAuxiliaryModules
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/PrivateResources/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/CoreML.playgroundbook/Contents/PrivateResources/cover.png
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/PublicResources/IMG_0032.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/CoreML.playgroundbook/Contents/PublicResources/IMG_0032.JPG
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/UserModules/PreviewViewController.playgroundmodule/Sources/PreviewViewController.swift:
--------------------------------------------------------------------------------
1 | import ARKit
2 | import PlaygroundSupport
3 | import RealityKit
4 | import UIKit
5 |
6 | open class PreviewViewController: UIViewController {
7 | public lazy var arView: ARView = {
8 | let view = ARView(frame: .zero, cameraMode: .ar, automaticallyConfigureSession: false)
9 | view.session.run(AROrientationTrackingConfiguration())
10 | view.addSubview(self.flipCameraButton)
11 | return view
12 | }()
13 | lazy var flipCameraButton: UIButton = {
14 | let button = UIButton()
15 | button.translatesAutoresizingMaskIntoConstraints = false
16 | button.addTarget(self, action: #selector(self.flipCamera), for: .touchUpInside)
17 | button.setAttributedTitle(
18 | NSAttributedString(
19 | string: "Flip",
20 | attributes: [
21 | NSAttributedString.Key.font: UIFont.preferredFont(forTextStyle: .headline),
22 | NSAttributedString.Key.foregroundColor: UIColor.orange
23 | ]
24 | ),
25 | for: .normal
26 | )
27 | return button
28 | }()
29 | var usingFrontCamera = false
30 |
31 | override open func viewDidLoad() {
32 | super.viewDidLoad()
33 |
34 | self.view = self.arView
35 |
36 | NSLayoutConstraint.activate([
37 | self.flipCameraButton.bottomAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.bottomAnchor),
38 | self.flipCameraButton.rightAnchor.constraint(equalTo: self.liveViewSafeAreaGuide.rightAnchor),
39 | ])
40 | }
41 |
42 | @objc func flipCamera(_ sender: UIButton) {
43 | UIView.transition(with: self.view, duration: 0.4, options: .transitionFlipFromLeft, animations: {
44 | let config = self.usingFrontCamera ? AROrientationTrackingConfiguration() : ARFaceTrackingConfiguration()
45 | self.arView.session.run(config)
46 |
47 | self.usingFrontCamera = !self.usingFrontCamera
48 | })
49 | }
50 | }
51 |
52 | extension PreviewViewController: PlaygroundLiveViewSafeAreaContainer {}
53 |
--------------------------------------------------------------------------------
/CoreML.playgroundbook/Contents/UserModules/UserCode.playgroundmodule/Sources/UserSource.swift:
--------------------------------------------------------------------------------
1 | import Accelerate
2 | import CoreML
3 | import UIKit
4 | import Vision
5 |
6 | public func compileModel(at url: URL) throws -> VNCoreMLModel {
7 | try compileModel(at: url, configuration: MLModelConfiguration())
8 | }
9 |
10 | public func compileModel(at url: URL, configuration: MLModelConfiguration) throws -> VNCoreMLModel {
11 | let compiledUrl = try MLModel.compileModel(at: url)
12 | let mlModel = try MLModel(contentsOf: compiledUrl, configuration: configuration)
13 | return try VNCoreMLModel(for: mlModel)
14 | }
15 |
16 | extension CGSize {
17 | public static func / (_ lhs: Self, _ rhs: Self) -> Self {
18 | Self(
19 | width: lhs.width / rhs.width,
20 | height: lhs.height / rhs.height
21 | )
22 | }
23 | }
24 |
25 | public func argmax(_ array: UnsafePointer, count: UInt) -> (Int, Double) {
26 | var maxValue: Double = 0
27 | var maxIndex: vDSP_Length = 0
28 | vDSP_maxviD(array, 1, &maxValue, &maxIndex, vDSP_Length(count))
29 | return (Int(maxIndex), maxValue)
30 | }
31 |
32 | extension CGImagePropertyOrientation {
33 | public init(interfaceOrientation: UIInterfaceOrientation) {
34 | switch interfaceOrientation {
35 | case .portrait:
36 | self = .right
37 | case .portraitUpsideDown:
38 | self = .left
39 | case .landscapeLeft:
40 | self = .down
41 | case .landscapeRight:
42 | self = .up
43 | default:
44 | self = .right
45 | }
46 | }
47 | }
48 |
49 | extension UIScreen {
50 | public var orientation: UIInterfaceOrientation {
51 | let point = self.coordinateSpace.convert(CGPoint.zero, to: self.fixedCoordinateSpace)
52 | switch (point.x, point.y) {
53 | case (0, 0):
54 | return .portrait
55 | case let (x, y) where x != 0 && y != 0:
56 | return .portraitUpsideDown
57 | case let (0, y) where y != 0:
58 | return .landscapeLeft
59 | case let (x, 0) where x != 0:
60 | return .landscapeRight
61 | default:
62 | return .unknown
63 | }
64 | }
65 | }
66 |
67 | public let coco_classes = [
68 | "person",
69 | "bicycle",
70 | "car",
71 | "motorbike",
72 | "aeroplane",
73 | "bus",
74 | "train",
75 | "truck",
76 | "boat",
77 | "traffic light",
78 | "fire hydrant",
79 | "stop sign",
80 | "parking meter",
81 | "bench",
82 | "bird",
83 | "cat",
84 | "dog",
85 | "horse",
86 | "sheep",
87 | "cow",
88 | "elephant",
89 | "bear",
90 | "zebra",
91 | "giraffe",
92 | "backpack",
93 | "umbrella",
94 | "handbag",
95 | "tie",
96 | "suitcase",
97 | "frisbee",
98 | "skis",
99 | "snowboard",
100 | "sports ball",
101 | "kite",
102 | "baseball bat",
103 | "baseball glove",
104 | "skateboard",
105 | "surfboard",
106 | "tennis racket",
107 | "bottle",
108 | "wine glass",
109 | "cup",
110 | "fork",
111 | "knife",
112 | "spoon",
113 | "bowl",
114 | "banana",
115 | "apple",
116 | "sandwich",
117 | "orange",
118 | "broccoli",
119 | "carrot",
120 | "hot dog",
121 | "pizza",
122 | "donut",
123 | "cake",
124 | "chair",
125 | "sofa",
126 | "pottedplant",
127 | "bed",
128 | "diningtable",
129 | "toilet",
130 | "tvmonitor",
131 | "laptop",
132 | "mouse",
133 | "remote",
134 | "keyboard",
135 | "cell phone",
136 | "microwave",
137 | "oven",
138 | "toaster",
139 | "sink",
140 | "refrigerator",
141 | "book",
142 | "clock",
143 | "vase",
144 | "scissors",
145 | "teddy bear",
146 | "hair drier",
147 | "toothbrush",
148 | ]
149 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Kenta Kubo
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## This repository is no longer maintained, but I'm currently developing [the new alternative](https://github.com/kkk669/CoreMLDemos.swiftpm).
2 |
3 | 
4 |
5 | # coreml-playground
6 |
7 | [](https://www.swift.org)
8 | [](https://itunes.apple.com/jp/app/swift-playgrounds/id908519492)
9 | 
10 | [](LICENSE)
11 |
12 | CoreML examples for Swift Playgrounds 3.1 on iPadOS 13.1 or later
13 |
14 | 
15 |
16 | ## Get Started
17 |
18 | [Subscribe to the feed on your iPad](https://developer.apple.com/ul/sp0?url=https://kebo.xyz/coreml-playground/feed.json)
19 |
20 | Before you run the code, make sure that your model file is imported.
21 |
22 | 
23 |
24 | ## Supported models
25 |
26 | 1. Object Recognition
27 | - [MobileNetv2](https://developer.apple.com/machine-learning/models/)
28 | - [Resnet50](https://developer.apple.com/machine-learning/models/)
29 | - [SqueezeNet](https://developer.apple.com/machine-learning/models/)
30 | 1. Object Detection
31 | - [YOLOv3-Tiny](https://developer.apple.com/machine-learning/models/)
32 | - [MobileNetV2+SSDLite](https://machinethink.net/blog/mobilenet-ssdlite-coreml/)
33 | - ~~[YOLOv3](https://developer.apple.com/machine-learning/models/)~~ (insufficient memory)
34 |
--------------------------------------------------------------------------------
/images/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/images/banner.png
--------------------------------------------------------------------------------
/images/capture.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/images/capture.gif
--------------------------------------------------------------------------------
/images/how-to-import-model.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/images/how-to-import-model.jpg
--------------------------------------------------------------------------------
/images/preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/images/preview.png
--------------------------------------------------------------------------------
/images/thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkebo/coreml-playground/3e148a88909b6eea27c482076bf45296ffb3f0fd/images/thumbnail.png
--------------------------------------------------------------------------------