├── .DS_Store
├── .gitignore
├── LICENSE
├── README.md
├── assets
├── ios.gif
└── live.png
├── iOS
├── .DS_Store
├── AppML.xcodeproj
│ ├── .xcodesamplecode.plist
│ ├── project.pbxproj
│ ├── project.xcworkspace
│ │ ├── contents.xcworkspacedata
│ │ ├── xcshareddata
│ │ │ ├── IDEWorkspaceChecks.plist
│ │ │ └── swiftpm
│ │ │ │ └── Package.resolved
│ │ └── xcuserdata
│ │ │ └── filippoaleotti.xcuserdatad
│ │ │ └── UserInterfaceState.xcuserstate
│ ├── xcshareddata
│ │ └── xcschemes
│ │ │ └── AppML.xcscheme
│ └── xcuserdata
│ │ └── filippoaleotti.xcuserdatad
│ │ └── xcdebugger
│ │ └── Breakpoints_v2.xcbkptlist
├── AppML.xcworkspace
│ ├── contents.xcworkspacedata
│ └── xcshareddata
│ │ ├── IDEWorkspaceChecks.plist
│ │ └── swiftpm
│ │ └── Package.resolved
└── AppML
│ ├── .DS_Store
│ ├── AppDelegate.swift
│ ├── Assets.xcassets
│ ├── .DS_Store
│ ├── AppIcon.appiconset
│ │ ├── Contents.json
│ │ ├── Icon-App-20x20@1x.png
│ │ ├── Icon-App-20x20@2x.png
│ │ ├── Icon-App-20x20@3x.png
│ │ ├── Icon-App-29x29@1x.png
│ │ ├── Icon-App-29x29@2x.png
│ │ ├── Icon-App-29x29@3x.png
│ │ ├── Icon-App-40x40@1x.png
│ │ ├── Icon-App-40x40@2x.png
│ │ ├── Icon-App-40x40@3x.png
│ │ ├── Icon-App-60x60@2x.png
│ │ ├── Icon-App-60x60@3x.png
│ │ ├── Icon-App-76x76@1x.png
│ │ ├── Icon-App-76x76@2x.png
│ │ ├── Icon-App-83.5x83.5@2x.png
│ │ └── ItunesArtwork@2x.png
│ ├── ColorFilterOff.imageset
│ │ ├── ColorFilterOff@2x.png
│ │ └── Contents.json
│ ├── ColorFilterOn.imageset
│ │ ├── ColorFilterOn@2x.png
│ │ └── Contents.json
│ ├── Contents.json
│ ├── SettingsIcon.imageset
│ │ ├── Contents.json
│ │ └── icons8-services-2.pdf
│ └── Trash.imageset
│ │ ├── Contents.json
│ │ └── Medium-S.pdf
│ ├── CameraLayer
│ ├── CameraStream.swift
│ └── Extensions
│ │ └── CGImage+CVPixelBuffer.swift
│ ├── Extensions
│ ├── CV
│ │ └── CVPixelBuffer+createCGImage.swift
│ ├── RxSwift
│ │ ├── RxSwiftBidirectionalBinding.swift
│ │ └── UITextView+textColor.swift
│ └── UI
│ │ └── UIAlertController+Ext.swift
│ ├── GraphicLayer
│ ├── ColorMapApplier.swift
│ ├── DepthToColorMap.metal
│ └── MetalColorMapApplier.swift
│ ├── Info.plist
│ ├── Models
│ ├── .DS_Store
│ └── Pydnet.mlmodel
│ ├── Mods
│ ├── CameraOutput.swift
│ ├── MonoCameraOutput.swift
│ ├── NeuralNetwork+Ext.swift
│ ├── NeuralNetwork.swift
│ └── StereoCameraOutput.swift
│ ├── NeuralNetworkRepository
│ ├── FileSystemNeuralNetworkRepository.swift
│ └── NeuralNetworkRepository.swift
│ ├── NeuralNetworks
│ ├── .DS_Store
│ ├── Helpers
│ │ ├── CVPixelBuffer+createCGImage.swift
│ │ ├── MonoInputFeatureProvider.swift
│ │ └── StereoInputFeatureProvider.swift
│ ├── Pydnet
│ │ └── OptimizedPydnet.mlmodel
│ └── QuantizedPydnet
│ │ └── PydnetQuantized.mlmodel
│ ├── Storyboards
│ ├── .DS_Store
│ └── Base.lproj
│ │ ├── .DS_Store
│ │ ├── LaunchScreen.storyboard
│ │ └── Main.storyboard
│ └── View
│ └── Main
│ ├── MainViewController.swift
│ ├── MainViewModel.swift
│ └── PreviewMode.swift
└── single_inference
├── .gitignore
├── LICENSE
├── README.md
├── eval_utils.py
├── export.py
├── inference.py
├── modules.py
├── network.py
├── requirements.txt
├── run.sh
├── test
├── 0.png
├── 3.png
├── 4.png
└── 6.png
├── test_kitti.py
├── test_kitti.txt
├── test_nyu.py
├── test_tum.py
├── test_tum.txt
└── train_files.txt
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 | */Android/app/build/*
106 | ckpt/*
107 | frozen_models/*
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PyDNet on mobile devices v2.0
2 | This repository contains the source code to run PyDNet on mobile devices.
3 |
4 | # What's new?
5 | In v2.0, we changed the procedure and the data used for training. More information will be provided soon...
6 |
7 | Moreover, we build also a web-based demonstration of the same network! You can try it now [here](https://filippoaleotti.github.io/demo_live/).
8 | The model runs directly on your browser, so anything to install!
9 |
10 |
11 |
12 |
13 |
14 | ## iOS
15 | The iOS demo has been developed by [Giulio Zaccaroni](https://github.com/GZaccaroni).
16 |
17 | XCode is required to build the app, moreover you need to sign in with your AppleID and trust yourself as certified developer.
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | ## Android
27 | The code will be released soon
28 |
29 | # License
30 | Code is licensed under APACHE version 2.0 license.
31 | Weights of the network can be used for research purposes only.
32 |
33 | # Contacts and links
34 | If you use this code in your projects, please cite our paper:
35 |
36 | ```
37 | @article{aleotti2020real,
38 | title={Real-time single image depth perception in the wild with handheld devices},
39 | author={Aleotti, Filippo and Zaccaroni, Giulio and Bartolomei, Luca and Poggi, Matteo and Tosi, Fabio and Mattoccia, Stefano},
40 | journal={Sensors},
41 | volume={21},
42 | year={2021}
43 | }
44 |
45 | @inproceedings{pydnet18,
46 | title = {Towards real-time unsupervised monocular depth estimation on CPU},
47 | author = {Poggi, Matteo and
48 | Aleotti, Filippo and
49 | Tosi, Fabio and
50 | Mattoccia, Stefano},
51 | booktitle = {IEEE/JRS Conference on Intelligent Robots and Systems (IROS)},
52 | year = {2018}
53 | }
54 | ```
55 |
56 | More info about the work can be found at these links:
57 | * [Real-time single image depth perception in the wild with handheld devices, Arxiv](https://arxiv.org/pdf/2006.05724.pdf)
58 | * [PyDNet paper](https://arxiv.org/pdf/1806.11430.pdf)
59 | * [PyDNet code](https://github.com/mattpoggi/pydnet)
60 |
61 | For questions, please send an email to filippo.aleotti2@unibo.it
62 |
--------------------------------------------------------------------------------
/assets/ios.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/assets/ios.gif
--------------------------------------------------------------------------------
/assets/live.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/assets/live.png
--------------------------------------------------------------------------------
/iOS/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/.DS_Store
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/.xcodesamplecode.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/project.pbxproj:
--------------------------------------------------------------------------------
1 | // !$*UTF8*$!
2 | {
3 | archiveVersion = 1;
4 | classes = {
5 | };
6 | objectVersion = 52;
7 | objects = {
8 |
9 | /* Begin PBXBuildFile section */
10 | 3C04E71122EC943000EA471B /* CoreImage.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 3C04E71022EC943000EA471B /* CoreImage.framework */; };
11 | 3C04E71322EC943800EA471B /* CoreVideo.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 3C04E71222EC943800EA471B /* CoreVideo.framework */; };
12 | 3C04E71522EC943C00EA471B /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 3C04E71422EC943C00EA471B /* Accelerate.framework */; };
13 | 3C140CA32322CD8F00D27DFD /* RxRelay in Frameworks */ = {isa = PBXBuildFile; productRef = 3C140CA22322CD8F00D27DFD /* RxRelay */; };
14 | 3C140CA52322CD8F00D27DFD /* RxSwift in Frameworks */ = {isa = PBXBuildFile; productRef = 3C140CA42322CD8F00D27DFD /* RxSwift */; };
15 | 3C140CA72322CD8F00D27DFD /* RxCocoa in Frameworks */ = {isa = PBXBuildFile; productRef = 3C140CA62322CD8F00D27DFD /* RxCocoa */; };
16 | 3C31317F2321499A006F9963 /* PreviewMode.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3C31317E2321499A006F9963 /* PreviewMode.swift */; };
17 | 3C44D82B22FEBBE100F57013 /* UIAlertController+Ext.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3C44D82A22FEBBE100F57013 /* UIAlertController+Ext.swift */; };
18 | 3C5C90332299A75800C2E814 /* DepthToColorMap.metal in Sources */ = {isa = PBXBuildFile; fileRef = 3C5C90322299A75800C2E814 /* DepthToColorMap.metal */; };
19 | 3C5EFCCE2320680E004F6F7A /* RxSwiftBidirectionalBinding.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3C5EFCCD2320680E004F6F7A /* RxSwiftBidirectionalBinding.swift */; };
20 | 3C5EFCD02320F208004F6F7A /* UITextView+textColor.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3C5EFCCF2320F208004F6F7A /* UITextView+textColor.swift */; };
21 | 3C6BB34B2322E7E70041D581 /* CVPixelBuffer+createCGImage.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3C6BB34A2322E7E70041D581 /* CVPixelBuffer+createCGImage.swift */; };
22 | 3C6D91E6232132BF008D987C /* MainViewModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3C6D91E5232132BF008D987C /* MainViewModel.swift */; };
23 | 3CB56B5C2479566A00143CD8 /* Pydnet.mlmodel in Sources */ = {isa = PBXBuildFile; fileRef = 3CB56B5B2479566A00143CD8 /* Pydnet.mlmodel */; };
24 | 3CC9262322991C22001C75CE /* MetalColorMapApplier.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3CC9262222991C22001C75CE /* MetalColorMapApplier.swift */; };
25 | 3CE77C082325061A00DBAAD5 /* ColorMapApplier.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3CE77C072325061A00DBAAD5 /* ColorMapApplier.swift */; };
26 | 3CEECB0E2486A7FF00535292 /* CameraStream.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3CEECB0A2486A7FF00535292 /* CameraStream.swift */; };
27 | 3CEECB102486A7FF00535292 /* CGImage+CVPixelBuffer.swift in Sources */ = {isa = PBXBuildFile; fileRef = 3CEECB0D2486A7FF00535292 /* CGImage+CVPixelBuffer.swift */; };
28 | 7AA677151CFF765600B353FB /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7AA677141CFF765600B353FB /* AppDelegate.swift */; };
29 | 7AA677171CFF765600B353FB /* MainViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7AA677161CFF765600B353FB /* MainViewController.swift */; };
30 | 7AA6771A1CFF765600B353FB /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 7AA677181CFF765600B353FB /* Main.storyboard */; };
31 | 7AA6771C1CFF765600B353FB /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 7AA6771B1CFF765600B353FB /* Assets.xcassets */; };
32 | 7AA6771F1CFF765600B353FB /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 7AA6771D1CFF765600B353FB /* LaunchScreen.storyboard */; };
33 | /* End PBXBuildFile section */
34 |
35 | /* Begin PBXFileReference section */
36 | 3C04E71022EC943000EA471B /* CoreImage.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreImage.framework; path = System/Library/Frameworks/CoreImage.framework; sourceTree = SDKROOT; };
37 | 3C04E71222EC943800EA471B /* CoreVideo.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreVideo.framework; path = System/Library/Frameworks/CoreVideo.framework; sourceTree = SDKROOT; };
38 | 3C04E71422EC943C00EA471B /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; };
39 | 3C31317E2321499A006F9963 /* PreviewMode.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PreviewMode.swift; sourceTree = ""; };
40 | 3C44D82A22FEBBE100F57013 /* UIAlertController+Ext.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "UIAlertController+Ext.swift"; sourceTree = ""; };
41 | 3C5C90322299A75800C2E814 /* DepthToColorMap.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = DepthToColorMap.metal; sourceTree = ""; };
42 | 3C5EFCCD2320680E004F6F7A /* RxSwiftBidirectionalBinding.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RxSwiftBidirectionalBinding.swift; sourceTree = ""; };
43 | 3C5EFCCF2320F208004F6F7A /* UITextView+textColor.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "UITextView+textColor.swift"; sourceTree = ""; };
44 | 3C6BB34A2322E7E70041D581 /* CVPixelBuffer+createCGImage.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "CVPixelBuffer+createCGImage.swift"; sourceTree = ""; };
45 | 3C6D91E5232132BF008D987C /* MainViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MainViewModel.swift; sourceTree = ""; };
46 | 3CB56B5B2479566A00143CD8 /* Pydnet.mlmodel */ = {isa = PBXFileReference; lastKnownFileType = file.mlmodel; name = Pydnet.mlmodel; path = AppML/Models/Pydnet.mlmodel; sourceTree = SOURCE_ROOT; };
47 | 3CC9262222991C22001C75CE /* MetalColorMapApplier.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MetalColorMapApplier.swift; sourceTree = ""; };
48 | 3CE77C072325061A00DBAAD5 /* ColorMapApplier.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ColorMapApplier.swift; sourceTree = ""; };
49 | 3CEECB0A2486A7FF00535292 /* CameraStream.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CameraStream.swift; sourceTree = ""; };
50 | 3CEECB0D2486A7FF00535292 /* CGImage+CVPixelBuffer.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "CGImage+CVPixelBuffer.swift"; sourceTree = ""; };
51 | 7AA677111CFF765600B353FB /* MobilePydnet.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = MobilePydnet.app; sourceTree = BUILT_PRODUCTS_DIR; };
52 | 7AA677141CFF765600B353FB /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; };
53 | 7AA677161CFF765600B353FB /* MainViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MainViewController.swift; sourceTree = ""; };
54 | 7AA677191CFF765600B353FB /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; };
55 | 7AA6771B1CFF765600B353FB /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; };
56 | 7AA6771E1CFF765600B353FB /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; };
57 | 7AA677201CFF765600B353FB /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; };
58 | /* End PBXFileReference section */
59 |
60 | /* Begin PBXFrameworksBuildPhase section */
61 | 7AA6770E1CFF765500B353FB /* Frameworks */ = {
62 | isa = PBXFrameworksBuildPhase;
63 | buildActionMask = 2147483647;
64 | files = (
65 | 3C140CA52322CD8F00D27DFD /* RxSwift in Frameworks */,
66 | 3C140CA32322CD8F00D27DFD /* RxRelay in Frameworks */,
67 | 3C04E71522EC943C00EA471B /* Accelerate.framework in Frameworks */,
68 | 3C140CA72322CD8F00D27DFD /* RxCocoa in Frameworks */,
69 | 3C04E71122EC943000EA471B /* CoreImage.framework in Frameworks */,
70 | 3C04E71322EC943800EA471B /* CoreVideo.framework in Frameworks */,
71 | );
72 | runOnlyForDeploymentPostprocessing = 0;
73 | };
74 | /* End PBXFrameworksBuildPhase section */
75 |
76 | /* Begin PBXGroup section */
77 | 2563FCD02563FC9000000001 /* Configuration */ = {
78 | isa = PBXGroup;
79 | children = (
80 | );
81 | name = Configuration;
82 | sourceTree = "";
83 | };
84 | 3C3FBB7F23225E8800DA8B32 /* UI */ = {
85 | isa = PBXGroup;
86 | children = (
87 | 3C44D82A22FEBBE100F57013 /* UIAlertController+Ext.swift */,
88 | );
89 | path = UI;
90 | sourceTree = "";
91 | };
92 | 3C5EFCC823205F50004F6F7A /* GraphicLayer */ = {
93 | isa = PBXGroup;
94 | children = (
95 | 3CE77C072325061A00DBAAD5 /* ColorMapApplier.swift */,
96 | 3CC9262222991C22001C75CE /* MetalColorMapApplier.swift */,
97 | 3C5C90322299A75800C2E814 /* DepthToColorMap.metal */,
98 | );
99 | path = GraphicLayer;
100 | sourceTree = "";
101 | };
102 | 3C5EFCCB232067F7004F6F7A /* Extensions */ = {
103 | isa = PBXGroup;
104 | children = (
105 | 3C6BB3472322E7BD0041D581 /* CV */,
106 | 3C3FBB7F23225E8800DA8B32 /* UI */,
107 | 3C5EFCCC232067FF004F6F7A /* RxSwift */,
108 | );
109 | path = Extensions;
110 | sourceTree = "";
111 | };
112 | 3C5EFCCC232067FF004F6F7A /* RxSwift */ = {
113 | isa = PBXGroup;
114 | children = (
115 | 3C5EFCCD2320680E004F6F7A /* RxSwiftBidirectionalBinding.swift */,
116 | 3C5EFCCF2320F208004F6F7A /* UITextView+textColor.swift */,
117 | );
118 | path = RxSwift;
119 | sourceTree = "";
120 | };
121 | 3C6BB3442322E7680041D581 /* DefaultNeuralNetworks */ = {
122 | isa = PBXGroup;
123 | children = (
124 | 3CB56B5B2479566A00143CD8 /* Pydnet.mlmodel */,
125 | );
126 | path = DefaultNeuralNetworks;
127 | sourceTree = "";
128 | };
129 | 3C6BB3472322E7BD0041D581 /* CV */ = {
130 | isa = PBXGroup;
131 | children = (
132 | 3C6BB34A2322E7E70041D581 /* CVPixelBuffer+createCGImage.swift */,
133 | );
134 | path = CV;
135 | sourceTree = "";
136 | };
137 | 3CEECB092486A7F100535292 /* CameraLayer */ = {
138 | isa = PBXGroup;
139 | children = (
140 | 3CEECB0C2486A7FF00535292 /* Extensions */,
141 | 3CEECB0A2486A7FF00535292 /* CameraStream.swift */,
142 | );
143 | path = CameraLayer;
144 | sourceTree = "";
145 | };
146 | 3CEECB0C2486A7FF00535292 /* Extensions */ = {
147 | isa = PBXGroup;
148 | children = (
149 | 3CEECB0D2486A7FF00535292 /* CGImage+CVPixelBuffer.swift */,
150 | );
151 | path = Extensions;
152 | sourceTree = "";
153 | };
154 | 3CF695B8229FC4CE00E4115E /* View */ = {
155 | isa = PBXGroup;
156 | children = (
157 | 3CF695B9229FC4DE00E4115E /* Main */,
158 | );
159 | path = View;
160 | sourceTree = "";
161 | };
162 | 3CF695B9229FC4DE00E4115E /* Main */ = {
163 | isa = PBXGroup;
164 | children = (
165 | 7AA677161CFF765600B353FB /* MainViewController.swift */,
166 | 3C6D91E5232132BF008D987C /* MainViewModel.swift */,
167 | 3C31317E2321499A006F9963 /* PreviewMode.swift */,
168 | );
169 | path = Main;
170 | sourceTree = "";
171 | };
172 | 3CF695BA229FC4F700E4115E /* Storyboards */ = {
173 | isa = PBXGroup;
174 | children = (
175 | 7AA677181CFF765600B353FB /* Main.storyboard */,
176 | 7AA6771D1CFF765600B353FB /* LaunchScreen.storyboard */,
177 | );
178 | path = Storyboards;
179 | sourceTree = "";
180 | };
181 | 7AA677081CFF765500B353FB = {
182 | isa = PBXGroup;
183 | children = (
184 | 7AA677131CFF765600B353FB /* AppML */,
185 | 7AA677121CFF765600B353FB /* Products */,
186 | 2563FCD02563FC9000000001 /* Configuration */,
187 | EF473959B157DAEEB0DB064A /* Frameworks */,
188 | );
189 | sourceTree = "";
190 | };
191 | 7AA677121CFF765600B353FB /* Products */ = {
192 | isa = PBXGroup;
193 | children = (
194 | 7AA677111CFF765600B353FB /* MobilePydnet.app */,
195 | );
196 | name = Products;
197 | sourceTree = "";
198 | };
199 | 7AA677131CFF765600B353FB /* AppML */ = {
200 | isa = PBXGroup;
201 | children = (
202 | 7AA677141CFF765600B353FB /* AppDelegate.swift */,
203 | 3C6BB3442322E7680041D581 /* DefaultNeuralNetworks */,
204 | 3CEECB092486A7F100535292 /* CameraLayer */,
205 | 3C5EFCC823205F50004F6F7A /* GraphicLayer */,
206 | 3CF695B8229FC4CE00E4115E /* View */,
207 | 3CF695BA229FC4F700E4115E /* Storyboards */,
208 | 3C5EFCCB232067F7004F6F7A /* Extensions */,
209 | 7AA6771B1CFF765600B353FB /* Assets.xcassets */,
210 | 7AA677201CFF765600B353FB /* Info.plist */,
211 | );
212 | path = AppML;
213 | sourceTree = "";
214 | };
215 | EF473959B157DAEEB0DB064A /* Frameworks */ = {
216 | isa = PBXGroup;
217 | children = (
218 | 3C04E71422EC943C00EA471B /* Accelerate.framework */,
219 | 3C04E71222EC943800EA471B /* CoreVideo.framework */,
220 | 3C04E71022EC943000EA471B /* CoreImage.framework */,
221 | );
222 | name = Frameworks;
223 | sourceTree = "";
224 | };
225 | /* End PBXGroup section */
226 |
227 | /* Begin PBXNativeTarget section */
228 | 7AA677101CFF765500B353FB /* AppML */ = {
229 | isa = PBXNativeTarget;
230 | buildConfigurationList = 7AA677231CFF765600B353FB /* Build configuration list for PBXNativeTarget "AppML" */;
231 | buildPhases = (
232 | 7AA6770D1CFF765500B353FB /* Sources */,
233 | 7AA6770E1CFF765500B353FB /* Frameworks */,
234 | 7AA6770F1CFF765500B353FB /* Resources */,
235 | );
236 | buildRules = (
237 | );
238 | dependencies = (
239 | );
240 | name = AppML;
241 | packageProductDependencies = (
242 | 3C140CA22322CD8F00D27DFD /* RxRelay */,
243 | 3C140CA42322CD8F00D27DFD /* RxSwift */,
244 | 3C140CA62322CD8F00D27DFD /* RxCocoa */,
245 | );
246 | productName = AVCam;
247 | productReference = 7AA677111CFF765600B353FB /* MobilePydnet.app */;
248 | productType = "com.apple.product-type.application";
249 | };
250 | /* End PBXNativeTarget section */
251 |
252 | /* Begin PBXProject section */
253 | 7AA677091CFF765500B353FB /* Project object */ = {
254 | isa = PBXProject;
255 | attributes = {
256 | LastSwiftUpdateCheck = 0800;
257 | LastUpgradeCheck = 1020;
258 | ORGANIZATIONNAME = Apple;
259 | TargetAttributes = {
260 | 7AA677101CFF765500B353FB = {
261 | CreatedOnToolsVersion = 8.0;
262 | LastSwiftMigration = 1020;
263 | ProvisioningStyle = Automatic;
264 | };
265 | };
266 | };
267 | buildConfigurationList = 7AA6770C1CFF765500B353FB /* Build configuration list for PBXProject "AppML" */;
268 | compatibilityVersion = "Xcode 3.2";
269 | developmentRegion = en;
270 | hasScannedForEncodings = 0;
271 | knownRegions = (
272 | en,
273 | Base,
274 | );
275 | mainGroup = 7AA677081CFF765500B353FB;
276 | packageReferences = (
277 | 3C140CA12322CD8F00D27DFD /* XCRemoteSwiftPackageReference "RxSwift" */,
278 | );
279 | productRefGroup = 7AA677121CFF765600B353FB /* Products */;
280 | projectDirPath = "";
281 | projectRoot = "";
282 | targets = (
283 | 7AA677101CFF765500B353FB /* AppML */,
284 | );
285 | };
286 | /* End PBXProject section */
287 |
288 | /* Begin PBXResourcesBuildPhase section */
289 | 7AA6770F1CFF765500B353FB /* Resources */ = {
290 | isa = PBXResourcesBuildPhase;
291 | buildActionMask = 2147483647;
292 | files = (
293 | 7AA6771F1CFF765600B353FB /* LaunchScreen.storyboard in Resources */,
294 | 7AA6771C1CFF765600B353FB /* Assets.xcassets in Resources */,
295 | 7AA6771A1CFF765600B353FB /* Main.storyboard in Resources */,
296 | );
297 | runOnlyForDeploymentPostprocessing = 0;
298 | };
299 | /* End PBXResourcesBuildPhase section */
300 |
301 | /* Begin PBXSourcesBuildPhase section */
302 | 7AA6770D1CFF765500B353FB /* Sources */ = {
303 | isa = PBXSourcesBuildPhase;
304 | buildActionMask = 2147483647;
305 | files = (
306 | 3CEECB102486A7FF00535292 /* CGImage+CVPixelBuffer.swift in Sources */,
307 | 3CC9262322991C22001C75CE /* MetalColorMapApplier.swift in Sources */,
308 | 3C5EFCCE2320680E004F6F7A /* RxSwiftBidirectionalBinding.swift in Sources */,
309 | 3CB56B5C2479566A00143CD8 /* Pydnet.mlmodel in Sources */,
310 | 3CEECB0E2486A7FF00535292 /* CameraStream.swift in Sources */,
311 | 3C5EFCD02320F208004F6F7A /* UITextView+textColor.swift in Sources */,
312 | 3C6BB34B2322E7E70041D581 /* CVPixelBuffer+createCGImage.swift in Sources */,
313 | 3C5C90332299A75800C2E814 /* DepthToColorMap.metal in Sources */,
314 | 7AA677171CFF765600B353FB /* MainViewController.swift in Sources */,
315 | 3C31317F2321499A006F9963 /* PreviewMode.swift in Sources */,
316 | 3CE77C082325061A00DBAAD5 /* ColorMapApplier.swift in Sources */,
317 | 7AA677151CFF765600B353FB /* AppDelegate.swift in Sources */,
318 | 3C6D91E6232132BF008D987C /* MainViewModel.swift in Sources */,
319 | 3C44D82B22FEBBE100F57013 /* UIAlertController+Ext.swift in Sources */,
320 | );
321 | runOnlyForDeploymentPostprocessing = 0;
322 | };
323 | /* End PBXSourcesBuildPhase section */
324 |
325 | /* Begin PBXVariantGroup section */
326 | 7AA677181CFF765600B353FB /* Main.storyboard */ = {
327 | isa = PBXVariantGroup;
328 | children = (
329 | 7AA677191CFF765600B353FB /* Base */,
330 | );
331 | name = Main.storyboard;
332 | sourceTree = "";
333 | };
334 | 7AA6771D1CFF765600B353FB /* LaunchScreen.storyboard */ = {
335 | isa = PBXVariantGroup;
336 | children = (
337 | 7AA6771E1CFF765600B353FB /* Base */,
338 | );
339 | name = LaunchScreen.storyboard;
340 | sourceTree = "";
341 | };
342 | /* End PBXVariantGroup section */
343 |
344 | /* Begin XCBuildConfiguration section */
345 | 7AA677211CFF765600B353FB /* Debug */ = {
346 | isa = XCBuildConfiguration;
347 | buildSettings = {
348 | ALWAYS_SEARCH_USER_PATHS = NO;
349 | CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
350 | CLANG_ANALYZER_NONNULL = YES;
351 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
352 | CLANG_CXX_LIBRARY = "libc++";
353 | CLANG_ENABLE_MODULES = YES;
354 | CLANG_ENABLE_OBJC_ARC = YES;
355 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
356 | CLANG_WARN_BOOL_CONVERSION = YES;
357 | CLANG_WARN_COMMA = YES;
358 | CLANG_WARN_CONSTANT_CONVERSION = YES;
359 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
360 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
361 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
362 | CLANG_WARN_EMPTY_BODY = YES;
363 | CLANG_WARN_ENUM_CONVERSION = YES;
364 | CLANG_WARN_INFINITE_RECURSION = YES;
365 | CLANG_WARN_INT_CONVERSION = YES;
366 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
367 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
368 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
369 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
370 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
371 | CLANG_WARN_STRICT_PROTOTYPES = YES;
372 | CLANG_WARN_SUSPICIOUS_MOVE = YES;
373 | CLANG_WARN_UNREACHABLE_CODE = YES;
374 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
375 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
376 | COPY_PHASE_STRIP = NO;
377 | DEBUG_INFORMATION_FORMAT = dwarf;
378 | ENABLE_STRICT_OBJC_MSGSEND = YES;
379 | ENABLE_TESTABILITY = YES;
380 | GCC_C_LANGUAGE_STANDARD = gnu99;
381 | GCC_DYNAMIC_NO_PIC = NO;
382 | GCC_NO_COMMON_BLOCKS = YES;
383 | GCC_OPTIMIZATION_LEVEL = 0;
384 | GCC_PREPROCESSOR_DEFINITIONS = (
385 | "DEBUG=1",
386 | "$(inherited)",
387 | );
388 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
389 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
390 | GCC_WARN_UNDECLARED_SELECTOR = YES;
391 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
392 | GCC_WARN_UNUSED_FUNCTION = YES;
393 | GCC_WARN_UNUSED_VARIABLE = YES;
394 | IPHONEOS_DEPLOYMENT_TARGET = 12.0;
395 | MTL_ENABLE_DEBUG_INFO = YES;
396 | ONLY_ACTIVE_ARCH = YES;
397 | SDKROOT = iphoneos;
398 | SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;
399 | SWIFT_OPTIMIZATION_LEVEL = "-Onone";
400 | SWIFT_VERSION = 4.2;
401 | TARGETED_DEVICE_FAMILY = "1,2";
402 | };
403 | name = Debug;
404 | };
405 | 7AA677221CFF765600B353FB /* Release */ = {
406 | isa = XCBuildConfiguration;
407 | buildSettings = {
408 | ALWAYS_SEARCH_USER_PATHS = NO;
409 | CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES;
410 | CLANG_ANALYZER_NONNULL = YES;
411 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
412 | CLANG_CXX_LIBRARY = "libc++";
413 | CLANG_ENABLE_MODULES = YES;
414 | CLANG_ENABLE_OBJC_ARC = YES;
415 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
416 | CLANG_WARN_BOOL_CONVERSION = YES;
417 | CLANG_WARN_COMMA = YES;
418 | CLANG_WARN_CONSTANT_CONVERSION = YES;
419 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
420 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
421 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
422 | CLANG_WARN_EMPTY_BODY = YES;
423 | CLANG_WARN_ENUM_CONVERSION = YES;
424 | CLANG_WARN_INFINITE_RECURSION = YES;
425 | CLANG_WARN_INT_CONVERSION = YES;
426 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
427 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
428 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
429 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
430 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
431 | CLANG_WARN_STRICT_PROTOTYPES = YES;
432 | CLANG_WARN_SUSPICIOUS_MOVE = YES;
433 | CLANG_WARN_UNREACHABLE_CODE = YES;
434 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
435 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
436 | COPY_PHASE_STRIP = NO;
437 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
438 | ENABLE_NS_ASSERTIONS = NO;
439 | ENABLE_STRICT_OBJC_MSGSEND = YES;
440 | GCC_C_LANGUAGE_STANDARD = gnu99;
441 | GCC_NO_COMMON_BLOCKS = YES;
442 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
443 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
444 | GCC_WARN_UNDECLARED_SELECTOR = YES;
445 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
446 | GCC_WARN_UNUSED_FUNCTION = YES;
447 | GCC_WARN_UNUSED_VARIABLE = YES;
448 | IPHONEOS_DEPLOYMENT_TARGET = 12.0;
449 | MTL_ENABLE_DEBUG_INFO = NO;
450 | SDKROOT = iphoneos;
451 | SWIFT_COMPILATION_MODE = wholemodule;
452 | SWIFT_OPTIMIZATION_LEVEL = "-O";
453 | SWIFT_VERSION = 4.2;
454 | TARGETED_DEVICE_FAMILY = "1,2";
455 | VALIDATE_PRODUCT = YES;
456 | };
457 | name = Release;
458 | };
459 | 7AA677241CFF765600B353FB /* Debug */ = {
460 | isa = XCBuildConfiguration;
461 | buildSettings = {
462 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
463 | CLANG_ENABLE_MODULES = YES;
464 | CODE_SIGN_IDENTITY = "iPhone Developer";
465 | CURRENT_PROJECT_VERSION = 1;
466 | DEAD_CODE_STRIPPING = NO;
467 | DEVELOPMENT_TEAM = TD7S8QYFSY;
468 | INFOPLIST_FILE = AppML/Info.plist;
469 | IPHONEOS_DEPLOYMENT_TARGET = 13.0;
470 | LD_RUNPATH_SEARCH_PATHS = (
471 | "$(inherited)",
472 | "@executable_path/Frameworks",
473 | );
474 | LD_VERIFY_BITCODE = NO;
475 | MARKETING_VERSION = 1.3;
476 | PRODUCT_BUNDLE_IDENTIFIER = it.filippoaleotti;
477 | PRODUCT_NAME = MobilePydnet;
478 | PROVISIONING_PROFILE_SPECIFIER = "";
479 | SDKROOT = iphoneos;
480 | SWIFT_OBJC_BRIDGING_HEADER = "";
481 | SWIFT_OPTIMIZATION_LEVEL = "-Onone";
482 | SWIFT_VERSION = 5.0;
483 | TARGETED_DEVICE_FAMILY = "1,2";
484 | };
485 | name = Debug;
486 | };
487 | 7AA677251CFF765600B353FB /* Release */ = {
488 | isa = XCBuildConfiguration;
489 | buildSettings = {
490 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
491 | CLANG_ENABLE_MODULES = YES;
492 | CODE_SIGN_IDENTITY = "iPhone Developer";
493 | CURRENT_PROJECT_VERSION = 1;
494 | DEAD_CODE_STRIPPING = NO;
495 | DEVELOPMENT_TEAM = ZF4U32B6GT;
496 | INFOPLIST_FILE = AppML/Info.plist;
497 | IPHONEOS_DEPLOYMENT_TARGET = 13.0;
498 | LD_RUNPATH_SEARCH_PATHS = (
499 | "$(inherited)",
500 | "@executable_path/Frameworks",
501 | );
502 | LD_VERIFY_BITCODE = NO;
503 | MARKETING_VERSION = 1.3;
504 | PRODUCT_BUNDLE_IDENTIFIER = it.gzaccaroni.mlapp;
505 | PRODUCT_NAME = MobilePydnet;
506 | PROVISIONING_PROFILE_SPECIFIER = "";
507 | SDKROOT = iphoneos;
508 | SWIFT_OBJC_BRIDGING_HEADER = "";
509 | SWIFT_VERSION = 5.0;
510 | TARGETED_DEVICE_FAMILY = "1,2";
511 | };
512 | name = Release;
513 | };
514 | /* End XCBuildConfiguration section */
515 |
516 | /* Begin XCConfigurationList section */
517 | 7AA6770C1CFF765500B353FB /* Build configuration list for PBXProject "AppML" */ = {
518 | isa = XCConfigurationList;
519 | buildConfigurations = (
520 | 7AA677211CFF765600B353FB /* Debug */,
521 | 7AA677221CFF765600B353FB /* Release */,
522 | );
523 | defaultConfigurationIsVisible = 0;
524 | defaultConfigurationName = Release;
525 | };
526 | 7AA677231CFF765600B353FB /* Build configuration list for PBXNativeTarget "AppML" */ = {
527 | isa = XCConfigurationList;
528 | buildConfigurations = (
529 | 7AA677241CFF765600B353FB /* Debug */,
530 | 7AA677251CFF765600B353FB /* Release */,
531 | );
532 | defaultConfigurationIsVisible = 0;
533 | defaultConfigurationName = Release;
534 | };
535 | /* End XCConfigurationList section */
536 |
537 | /* Begin XCRemoteSwiftPackageReference section */
538 | 3C140CA12322CD8F00D27DFD /* XCRemoteSwiftPackageReference "RxSwift" */ = {
539 | isa = XCRemoteSwiftPackageReference;
540 | repositoryURL = "git@github.com:ReactiveX/RxSwift.git";
541 | requirement = {
542 | kind = upToNextMajorVersion;
543 | minimumVersion = 5.0.1;
544 | };
545 | };
546 | /* End XCRemoteSwiftPackageReference section */
547 |
548 | /* Begin XCSwiftPackageProductDependency section */
549 | 3C140CA22322CD8F00D27DFD /* RxRelay */ = {
550 | isa = XCSwiftPackageProductDependency;
551 | package = 3C140CA12322CD8F00D27DFD /* XCRemoteSwiftPackageReference "RxSwift" */;
552 | productName = RxRelay;
553 | };
554 | 3C140CA42322CD8F00D27DFD /* RxSwift */ = {
555 | isa = XCSwiftPackageProductDependency;
556 | package = 3C140CA12322CD8F00D27DFD /* XCRemoteSwiftPackageReference "RxSwift" */;
557 | productName = RxSwift;
558 | };
559 | 3C140CA62322CD8F00D27DFD /* RxCocoa */ = {
560 | isa = XCSwiftPackageProductDependency;
561 | package = 3C140CA12322CD8F00D27DFD /* XCRemoteSwiftPackageReference "RxSwift" */;
562 | productName = RxCocoa;
563 | };
564 | /* End XCSwiftPackageProductDependency section */
565 | };
566 | rootObject = 7AA677091CFF765500B353FB /* Project object */;
567 | }
568 |
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | IDEDidComputeMac32BitWarning
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved:
--------------------------------------------------------------------------------
1 | {
2 | "object": {
3 | "pins": [
4 | {
5 | "package": "RxSwift",
6 | "repositoryURL": "git@github.com:ReactiveX/RxSwift.git",
7 | "state": {
8 | "branch": null,
9 | "revision": "b3e888b4972d9bc76495dd74d30a8c7fad4b9395",
10 | "version": "5.0.1"
11 | }
12 | }
13 | ]
14 | },
15 | "version": 1
16 | }
17 |
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/project.xcworkspace/xcuserdata/filippoaleotti.xcuserdatad/UserInterfaceState.xcuserstate:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML.xcodeproj/project.xcworkspace/xcuserdata/filippoaleotti.xcuserdatad/UserInterfaceState.xcuserstate
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/xcshareddata/xcschemes/AppML.xcscheme:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
9 |
15 |
21 |
22 |
23 |
24 |
25 |
30 |
31 |
37 |
38 |
39 |
40 |
41 |
42 |
53 |
55 |
61 |
62 |
63 |
64 |
70 |
72 |
78 |
79 |
80 |
81 |
83 |
84 |
87 |
88 |
89 |
--------------------------------------------------------------------------------
/iOS/AppML.xcodeproj/xcuserdata/filippoaleotti.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
--------------------------------------------------------------------------------
/iOS/AppML.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/iOS/AppML.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | IDEDidComputeMac32BitWarning
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/iOS/AppML.xcworkspace/xcshareddata/swiftpm/Package.resolved:
--------------------------------------------------------------------------------
1 | {
2 | "object": {
3 | "pins": [
4 | {
5 | "package": "RxSwift",
6 | "repositoryURL": "git@github.com:ReactiveX/RxSwift.git",
7 | "state": {
8 | "branch": null,
9 | "revision": "002d325b0bdee94e7882e1114af5ff4fe1e96afa",
10 | "version": "5.1.1"
11 | }
12 | }
13 | ]
14 | },
15 | "version": 1
16 | }
17 |
--------------------------------------------------------------------------------
/iOS/AppML/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/.DS_Store
--------------------------------------------------------------------------------
/iOS/AppML/AppDelegate.swift:
--------------------------------------------------------------------------------
1 | //
2 | // AppDelegate.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 21/04/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 | import UIKit
9 | @UIApplicationMain
10 | class AppDelegate: UIResponder, UIApplicationDelegate {
11 | var window: UIWindow?
12 | func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey : Any]? = nil) -> Bool {
13 | UIApplication.shared.isIdleTimerDisabled = true
14 |
15 | self.window = UIWindow(frame: UIScreen.main.bounds)
16 |
17 | let storyboard = UIStoryboard(name: "Main", bundle: nil)
18 |
19 | let initialViewController = storyboard.instantiateInitialViewController()!
20 |
21 | self.window?.rootViewController = initialViewController
22 | self.window?.makeKeyAndVisible()
23 |
24 | return true
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/.DS_Store
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "images" : [
3 | {
4 | "size" : "20x20",
5 | "idiom" : "iphone",
6 | "filename" : "Icon-App-20x20@2x.png",
7 | "scale" : "2x"
8 | },
9 | {
10 | "size" : "20x20",
11 | "idiom" : "iphone",
12 | "filename" : "Icon-App-20x20@3x.png",
13 | "scale" : "3x"
14 | },
15 | {
16 | "size" : "29x29",
17 | "idiom" : "iphone",
18 | "filename" : "Icon-App-29x29@1x.png",
19 | "scale" : "1x"
20 | },
21 | {
22 | "size" : "29x29",
23 | "idiom" : "iphone",
24 | "filename" : "Icon-App-29x29@2x.png",
25 | "scale" : "2x"
26 | },
27 | {
28 | "size" : "29x29",
29 | "idiom" : "iphone",
30 | "filename" : "Icon-App-29x29@3x.png",
31 | "scale" : "3x"
32 | },
33 | {
34 | "size" : "40x40",
35 | "idiom" : "iphone",
36 | "filename" : "Icon-App-40x40@2x.png",
37 | "scale" : "2x"
38 | },
39 | {
40 | "size" : "40x40",
41 | "idiom" : "iphone",
42 | "filename" : "Icon-App-40x40@3x.png",
43 | "scale" : "3x"
44 | },
45 | {
46 | "size" : "60x60",
47 | "idiom" : "iphone",
48 | "filename" : "Icon-App-60x60@2x.png",
49 | "scale" : "2x"
50 | },
51 | {
52 | "size" : "60x60",
53 | "idiom" : "iphone",
54 | "filename" : "Icon-App-60x60@3x.png",
55 | "scale" : "3x"
56 | },
57 | {
58 | "size" : "20x20",
59 | "idiom" : "ipad",
60 | "filename" : "Icon-App-20x20@1x.png",
61 | "scale" : "1x"
62 | },
63 | {
64 | "size" : "20x20",
65 | "idiom" : "ipad",
66 | "filename" : "Icon-App-20x20@2x.png",
67 | "scale" : "2x"
68 | },
69 | {
70 | "size" : "29x29",
71 | "idiom" : "ipad",
72 | "filename" : "Icon-App-29x29@1x.png",
73 | "scale" : "1x"
74 | },
75 | {
76 | "size" : "29x29",
77 | "idiom" : "ipad",
78 | "filename" : "Icon-App-29x29@2x.png",
79 | "scale" : "2x"
80 | },
81 | {
82 | "size" : "40x40",
83 | "idiom" : "ipad",
84 | "filename" : "Icon-App-40x40@1x.png",
85 | "scale" : "1x"
86 | },
87 | {
88 | "size" : "40x40",
89 | "idiom" : "ipad",
90 | "filename" : "Icon-App-40x40@2x.png",
91 | "scale" : "2x"
92 | },
93 | {
94 | "size" : "76x76",
95 | "idiom" : "ipad",
96 | "filename" : "Icon-App-76x76@1x.png",
97 | "scale" : "1x"
98 | },
99 | {
100 | "size" : "76x76",
101 | "idiom" : "ipad",
102 | "filename" : "Icon-App-76x76@2x.png",
103 | "scale" : "2x"
104 | },
105 | {
106 | "size" : "83.5x83.5",
107 | "idiom" : "ipad",
108 | "filename" : "Icon-App-83.5x83.5@2x.png",
109 | "scale" : "2x"
110 | },
111 | {
112 | "size" : "1024x1024",
113 | "idiom" : "ios-marketing",
114 | "filename" : "ItunesArtwork@2x.png",
115 | "scale" : "1x"
116 | }
117 | ],
118 | "info" : {
119 | "version" : 1,
120 | "author" : "xcode"
121 | }
122 | }
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@1x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@1x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@3x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@3x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@1x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@1x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@3x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@3x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@1x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@1x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@3x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@3x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-60x60@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-60x60@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-60x60@3x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-60x60@3x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-76x76@1x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-76x76@1x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-76x76@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-76x76@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-83.5x83.5@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/Icon-App-83.5x83.5@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/AppIcon.appiconset/ItunesArtwork@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/AppIcon.appiconset/ItunesArtwork@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/ColorFilterOff.imageset/ColorFilterOff@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/ColorFilterOff.imageset/ColorFilterOff@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/ColorFilterOff.imageset/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "images" : [
3 | {
4 | "idiom" : "universal",
5 | "scale" : "1x"
6 | },
7 | {
8 | "idiom" : "universal",
9 | "filename" : "ColorFilterOff@2x.png",
10 | "scale" : "2x"
11 | },
12 | {
13 | "idiom" : "universal",
14 | "scale" : "3x"
15 | }
16 | ],
17 | "info" : {
18 | "version" : 1,
19 | "author" : "xcode"
20 | },
21 | "properties" : {
22 | "template-rendering-intent" : "template"
23 | }
24 | }
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/ColorFilterOn.imageset/ColorFilterOn@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/ColorFilterOn.imageset/ColorFilterOn@2x.png
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/ColorFilterOn.imageset/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "images" : [
3 | {
4 | "idiom" : "universal",
5 | "scale" : "1x"
6 | },
7 | {
8 | "idiom" : "universal",
9 | "filename" : "ColorFilterOn@2x.png",
10 | "scale" : "2x"
11 | },
12 | {
13 | "idiom" : "universal",
14 | "scale" : "3x"
15 | }
16 | ],
17 | "info" : {
18 | "version" : 1,
19 | "author" : "xcode"
20 | },
21 | "properties" : {
22 | "template-rendering-intent" : "template"
23 | }
24 | }
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "info" : {
3 | "version" : 1,
4 | "author" : "xcode"
5 | }
6 | }
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/SettingsIcon.imageset/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "images" : [
3 | {
4 | "idiom" : "universal",
5 | "filename" : "icons8-services-2.pdf"
6 | }
7 | ],
8 | "info" : {
9 | "version" : 1,
10 | "author" : "xcode"
11 | }
12 | }
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/SettingsIcon.imageset/icons8-services-2.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/SettingsIcon.imageset/icons8-services-2.pdf
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/Trash.imageset/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "images" : [
3 | {
4 | "idiom" : "universal",
5 | "filename" : "Medium-S.pdf"
6 | }
7 | ],
8 | "info" : {
9 | "version" : 1,
10 | "author" : "xcode"
11 | },
12 | "properties" : {
13 | "template-rendering-intent" : "template"
14 | }
15 | }
--------------------------------------------------------------------------------
/iOS/AppML/Assets.xcassets/Trash.imageset/Medium-S.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Assets.xcassets/Trash.imageset/Medium-S.pdf
--------------------------------------------------------------------------------
/iOS/AppML/CameraLayer/CameraStream.swift:
--------------------------------------------------------------------------------
1 | //
2 | // CameraStream.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 27/07/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import AVFoundation
10 | import Photos
11 | import Accelerate
12 | import CoreML
13 | import RxSwift
14 | public class CameraStream: NSObject {
15 | private let session = AVCaptureSession()
16 | private var isSessionRunning = false
17 | private let dataOutputQueue = DispatchQueue(label: "data output queue")
18 | private let sessionQueue = DispatchQueue(label: "session queue") // Communicate with the session and other session objects on this queue.
19 | private var subject: PublishSubject?
20 |
21 | public func configure() -> Completable{
22 | return Completable.create { completable in
23 | return self.sessionQueue.sync {
24 | return self.configureSession(completable: completable)
25 | }
26 | }
27 | }
28 | public func start() -> Observable{
29 | let subject = PublishSubject()
30 | sessionQueue.sync {
31 | self.subject = subject
32 | session.startRunning()
33 | self.isSessionRunning = self.session.isRunning
34 | }
35 | return subject
36 | }
37 | public func stop(){
38 | sessionQueue.sync {
39 | self.session.stopRunning()
40 | self.isSessionRunning = self.session.isRunning
41 | subject?.dispose()
42 | self.subject = nil
43 | }
44 | }
45 |
46 | private let videoOutput = AVCaptureVideoDataOutput()
47 | @objc private dynamic var videoDeviceInput: AVCaptureDeviceInput!
48 | private func configureSession(completable: ((CompletableEvent) -> ())) -> Cancelable{
49 |
50 | session.beginConfiguration()
51 |
52 | session.sessionPreset = .hd1920x1080
53 |
54 | do {
55 |
56 | // default to a wide angle camera.
57 |
58 | guard let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back) else{
59 | print("Camera video device is unavailable.")
60 | session.commitConfiguration()
61 | completable(.error(SessionSetupError.configurationFailed))
62 | return Disposables.create {}
63 | }
64 | let videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
65 |
66 | if session.canAddInput(videoDeviceInput) {
67 | session.addInput(videoDeviceInput)
68 | self.videoDeviceInput = videoDeviceInput
69 |
70 | try videoDeviceInput.device.lockForConfiguration()
71 | videoDeviceInput.device.focusMode = .continuousAutoFocus
72 | videoDeviceInput.device.unlockForConfiguration()
73 | } else {
74 | print("Couldn't add video device input to the session.")
75 | session.commitConfiguration()
76 | completable(.error(SessionSetupError.configurationFailed))
77 | return Disposables.create {} }
78 |
79 | } catch {
80 | print("Couldn't create video device input: \(error)")
81 | session.commitConfiguration()
82 | completable(.error(SessionSetupError.configurationFailed))
83 | return Disposables.create {}
84 | }
85 | videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
86 | videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
87 |
88 | session.addOutput(videoOutput)
89 |
90 | let videoConnection = videoOutput.connection(with: .video)
91 | videoConnection?.videoOrientation = .landscapeLeft
92 |
93 | session.commitConfiguration()
94 |
95 | completable(.completed)
96 | return Disposables.create {}
97 | }
98 |
99 | }
100 | extension CameraStream: AVCaptureVideoDataOutputSampleBufferDelegate {
101 | public func captureOutput(_ output: AVCaptureOutput,
102 | didOutput sampleBuffer: CMSampleBuffer,
103 | from connection: AVCaptureConnection) {
104 | guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
105 | subject?.onNext(imageBuffer)
106 | }
107 | }
108 |
109 | public enum SessionSetupError: Error {
110 | case needAuthorization
111 | case authorizationDenied
112 | case configurationFailed
113 | case multiCamNotSupported
114 | }
115 |
--------------------------------------------------------------------------------
/iOS/AppML/CameraLayer/Extensions/CGImage+CVPixelBuffer.swift:
--------------------------------------------------------------------------------
1 | //
2 | // CGImage+CVPixelBuffer.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 07/08/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 |
10 | import CoreGraphics
11 | import CoreImage
12 | import VideoToolbox
13 | extension CGImage {
14 | /**
15 | Resizes the image to width x height and converts it to an RGB CVPixelBuffer.
16 | */
17 | public var pixelBuffer: CVPixelBuffer? {
18 | return pixelBuffer(width: width, height: height,
19 | pixelFormatType: kCVPixelFormatType_32ARGB,
20 | colorSpace: CGColorSpaceCreateDeviceRGB(),
21 | alphaInfo: .noneSkipFirst)
22 | }
23 | public func pixelBuffer(width: Int, height: Int,
24 | orientation: CGImagePropertyOrientation) -> CVPixelBuffer? {
25 | return pixelBuffer(width: width, height: height,
26 | pixelFormatType: kCVPixelFormatType_32ARGB,
27 | colorSpace: CGColorSpaceCreateDeviceRGB(),
28 | alphaInfo: .noneSkipFirst)
29 | }
30 |
31 | func pixelBuffer(width: Int, height: Int, pixelFormatType: OSType,
32 | colorSpace: CGColorSpace, alphaInfo: CGImageAlphaInfo) -> CVPixelBuffer? {
33 |
34 | var maybePixelBuffer: CVPixelBuffer?
35 | let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
36 | kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue]
37 | let status = CVPixelBufferCreate(kCFAllocatorDefault,
38 | width,
39 | height,
40 | pixelFormatType,
41 | attrs as CFDictionary,
42 | &maybePixelBuffer)
43 |
44 | guard status == kCVReturnSuccess, let pixelBuffer = maybePixelBuffer else {
45 | return nil
46 | }
47 |
48 | let flags = CVPixelBufferLockFlags(rawValue: 0)
49 | guard kCVReturnSuccess == CVPixelBufferLockBaseAddress(pixelBuffer, flags) else {
50 | return nil
51 | }
52 | defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, flags) }
53 |
54 | guard let context = CGContext(data: CVPixelBufferGetBaseAddress(pixelBuffer),
55 | width: width,
56 | height: height,
57 | bitsPerComponent: 8,
58 | bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer),
59 | space: colorSpace,
60 | bitmapInfo: alphaInfo.rawValue)
61 | else {
62 | return nil
63 | }
64 |
65 | context.draw(self, in: CGRect(x: 0, y: 0, width: width, height: height))
66 | return pixelBuffer
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/iOS/AppML/Extensions/CV/CVPixelBuffer+createCGImage.swift:
--------------------------------------------------------------------------------
1 | //
2 | // CGImage+createPixelBuffer.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 27/07/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreGraphics
11 | import CoreVideo
12 | import VideoToolbox
13 | extension CVPixelBuffer{
14 | public func createCGImage() -> CGImage? {
15 | var cgImage: CGImage?
16 | VTCreateCGImageFromCVPixelBuffer(self, options: nil, imageOut: &cgImage)
17 | return cgImage
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/iOS/AppML/Extensions/RxSwift/RxSwiftBidirectionalBinding.swift:
--------------------------------------------------------------------------------
1 | //
2 | // RxSwiftBidirectionalBinding.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 04/09/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import RxSwift
11 | import RxCocoa
12 | infix operator <-> : DefaultPrecedence
13 |
14 | func <-> (property: ControlProperty, relay: BehaviorRelay) -> Disposable {
15 |
16 | let bindToUIDisposable = relay.bind(to: property)
17 | let bindToRelay = property
18 | .subscribe(onNext: { n in
19 | relay.accept(n)
20 | }, onCompleted: {
21 | bindToUIDisposable.dispose()
22 | })
23 |
24 | return Disposables.create(bindToUIDisposable, bindToRelay)
25 | }
26 |
--------------------------------------------------------------------------------
/iOS/AppML/Extensions/RxSwift/UITextView+textColor.swift:
--------------------------------------------------------------------------------
1 | //
2 | // UITextView+textColor.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 05/09/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import UIKit
10 | import RxSwift
11 | import RxCocoa
12 | extension Reactive where Base: UITextField {
13 | public var textColor: Binder {
14 | return Binder(self.base) { view, color in
15 | view.textColor = color
16 | }
17 | }
18 | public var placeholder: Binder {
19 | return Binder(self.base) { view, placeholder in
20 | view.placeholder = placeholder
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/iOS/AppML/Extensions/UI/UIAlertController+Ext.swift:
--------------------------------------------------------------------------------
1 | //
2 | // UIAlertController+LoadingAlert.swift
3 | // DataGrabber
4 | //
5 | // Created by Giulio Zaccaroni on 24/05/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import UIKit
10 | extension UIAlertController {
11 | convenience init(loadingMessage: String) {
12 | self.init(title: nil, message: loadingMessage, preferredStyle: .alert)
13 |
14 | let loadingIndicator = UIActivityIndicatorView(frame: CGRect(x: 10, y: 5, width: 50, height: 50))
15 | loadingIndicator.hidesWhenStopped = true
16 | loadingIndicator.style = UIActivityIndicatorView.Style.medium
17 | loadingIndicator.startAnimating();
18 |
19 | self.view.addSubview(loadingIndicator)
20 | }
21 | convenience init(title: String, info: String) {
22 | self.init(title: title, message: info, preferredStyle: .alert)
23 | let OKAction = UIAlertAction(title: "Ok", style: .default) { (action:UIAlertAction!) in
24 | self.dismiss(animated: true)
25 | }
26 | self.addAction(OKAction)
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/iOS/AppML/GraphicLayer/ColorMapApplier.swift:
--------------------------------------------------------------------------------
1 | //
2 | // ColorMapApplier.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 08/09/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreGraphics
11 | protocol ColorMapApplier {
12 | func prepare(colorFilter: ColorFilter)
13 | func render(image: CGImage) -> CGImage?
14 | }
15 |
--------------------------------------------------------------------------------
/iOS/AppML/GraphicLayer/DepthToColorMap.metal:
--------------------------------------------------------------------------------
1 | /*
2 | See LICENSE folder for this sample’s licensing information.
3 |
4 | Abstract:
5 | Metal compute shader that translates depth values to color map RGB values.
6 | */
7 |
8 | #include
9 | using namespace metal;
10 |
11 | struct BGRAPixel {
12 | uchar b;
13 | uchar g;
14 | uchar r;
15 | uchar a;
16 | };
17 |
18 |
19 | kernel void depthToColorMap(texture2d inputTexture [[ texture(0) ]],
20 | texture2d outputTexture [[ texture(1) ]],
21 | constant BGRAPixel *colorTable [[ buffer(3) ]],
22 | uint2 gid [[ thread_position_in_grid ]])
23 | {
24 |
25 | // Ensure we don't read or write outside of the texture
26 | if ((gid.x >= inputTexture.get_width()) || (gid.y >= inputTexture.get_height())) {
27 | return;
28 | }
29 |
30 | float depth = inputTexture.read(gid).x;
31 | depth = depth * 255;
32 |
33 | BGRAPixel outputColor = colorTable[(int) (depth)];
34 | outputTexture.write(float4(outputColor.b / 255.0, outputColor.g / 255.0, outputColor.r / 255.0, 1.0), gid);
35 | }
36 |
--------------------------------------------------------------------------------
/iOS/AppML/GraphicLayer/MetalColorMapApplier.swift:
--------------------------------------------------------------------------------
1 | //
2 | // DepthToColorMapConverter.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 21/04/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import CoreMedia
10 | import CoreVideo
11 | import MetalKit
12 |
13 | struct BGRAPixel {
14 | var blue: UInt8 = 0
15 | var green: UInt8 = 0
16 | var red: UInt8 = 0
17 | var alpha: UInt8 = 0
18 | }
19 |
20 | class ColorTable: NSObject {
21 | private var tableBuf: MTLBuffer?
22 |
23 | required init (metalDevice: MTLDevice, colors: [Int]) {
24 | self.tableBuf = metalDevice.makeBuffer(length: MemoryLayout.size * colors.count, options: .storageModeShared)
25 | self.colors = colors
26 | super.init()
27 | self.fillTable(size: colors.count)
28 | }
29 |
30 | deinit {
31 | }
32 | private func set(table: UnsafeMutablePointer, index: Int, rgb: Int){
33 | table[index].alpha = (UInt8)(255)
34 | table[index].red = (UInt8)((rgb >> 16) & 0xFF)
35 | table[index].green = (UInt8)((rgb >> 8) & 0xFF)
36 | table[index].blue = (UInt8)(rgb & 0xFF)
37 |
38 | }
39 | private let colors: [Int]
40 | private func fillTable(size: Int) {
41 |
42 | let table = tableBuf?.contents().bindMemory(to: BGRAPixel.self, capacity: size)
43 | // Get pixel info
44 | for idx in 0.. MTLBuffer {
51 | return tableBuf!
52 | }
53 | }
54 |
55 | class MetalColorMapApplier: ColorMapApplier {
56 | var isPrepared = false
57 |
58 | private let metalDevice = MTLCreateSystemDefaultDevice()!
59 |
60 | private var computePipelineState: MTLComputePipelineState?
61 |
62 | private lazy var commandQueue: MTLCommandQueue? = {
63 | return self.metalDevice.makeCommandQueue()
64 | }()
65 | private let colors = 256
66 | private var colorBuf: MTLBuffer?
67 |
68 | private(set) var preparedColorFilter: ColorFilter? = nil
69 | private let bytesPerPixel = 8
70 |
71 |
72 | required init() {
73 | let defaultLibrary = metalDevice.makeDefaultLibrary()!
74 | let kernelFunction = defaultLibrary.makeFunction(name: "depthToColorMap")
75 | do {
76 | computePipelineState = try metalDevice.makeComputePipelineState(function: kernelFunction!)
77 | } catch {
78 | fatalError("Unable to create depth converter pipeline state. (\(error))")
79 | }
80 |
81 | }
82 |
83 | func prepare(colorFilter: ColorFilter) {
84 | guard preparedColorFilter != colorFilter else {
85 | return
86 | }
87 | reset()
88 |
89 |
90 | if colorFilter != .none {
91 | let colorTable = ColorTable(metalDevice: metalDevice, colors: colorFilter.colors)
92 | colorBuf = colorTable.getColorTable()
93 | }
94 | isPrepared = true
95 | preparedColorFilter = colorFilter
96 | }
97 |
98 | func reset() {
99 | colorBuf = nil
100 | isPrepared = false
101 | }
102 |
103 | // MARK: - Depth to colormap Conversion
104 | func render(image: CGImage) -> CGImage? {
105 | if !isPrepared {
106 | assertionFailure("Invalid state: Not prepared")
107 | return nil
108 | }
109 | let preparedColorFilter = self.preparedColorFilter!
110 | guard preparedColorFilter != .none else {
111 | return image
112 | }
113 | guard let outputTexture = texture(pixelFormat: .bgra8Unorm, width: image.width, height: image.height),
114 | let inputTexture = texture(from: image) else {
115 | return nil
116 | }
117 |
118 | // Set up command queue, buffer, and encoder
119 | guard let commandQueue = commandQueue,
120 | let commandBuffer = commandQueue.makeCommandBuffer(),
121 | let commandEncoder = commandBuffer.makeComputeCommandEncoder() else {
122 | print("Failed to create Metal command queue")
123 | return nil
124 | }
125 |
126 | commandEncoder.label = "Depth to Colormap"
127 | let computePipelineState = self.computePipelineState!
128 |
129 | commandEncoder.setComputePipelineState(computePipelineState)
130 | commandEncoder.setTexture(inputTexture, index: 0)
131 | commandEncoder.setTexture(outputTexture, index: 1)
132 |
133 | if preparedColorFilter != .none {
134 | commandEncoder.setBuffer(colorBuf, offset: 0, index: 3)
135 | }
136 |
137 | // Set up the thread groups.
138 | let width = computePipelineState.threadExecutionWidth
139 | let height = computePipelineState.maxTotalThreadsPerThreadgroup / width
140 | let threadsPerThreadgroup = MTLSizeMake(width, height, 1)
141 | let threadgroupsPerGrid = MTLSize(width: (inputTexture.width + width - 1) / width,
142 | height: (inputTexture.height + height - 1) / height,
143 | depth: 1)
144 | commandEncoder.dispatchThreadgroups(threadgroupsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
145 |
146 | commandEncoder.endEncoding()
147 |
148 | commandBuffer.commit()
149 |
150 | return cgImage(from: outputTexture)
151 | }
152 | private func cgImage(from texture: MTLTexture) -> CGImage? {
153 |
154 | // The total number of bytes of the texture
155 | let imageByteCount = texture.width * texture.height * bytesPerPixel
156 |
157 | // The number of bytes for each image row
158 | let bytesPerRow = texture.width * bytesPerPixel
159 |
160 | // An empty buffer that will contain the image
161 | var src = [UInt8](repeating: 0, count: Int(imageByteCount))
162 |
163 | // Gets the bytes from the texture
164 | let region = MTLRegionMake2D(0, 0, texture.width, texture.height)
165 | texture.getBytes(&src, bytesPerRow: bytesPerRow, from: region, mipmapLevel: 0)
166 |
167 | // Creates an image context
168 | let bitmapInfo = CGBitmapInfo(rawValue: (CGBitmapInfo.byteOrder32Big.rawValue | CGImageAlphaInfo.premultipliedLast.rawValue))
169 | let bitsPerComponent = 8
170 | let colorSpace = CGColorSpaceCreateDeviceRGB()
171 | let context = CGContext(data: &src, width: texture.width, height: texture.height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
172 |
173 | // Creates the image from the graphics context
174 | let dstImage = context?.makeImage()
175 |
176 | // Creates the final UIImage
177 | return dstImage
178 | }
179 | private func texture(from image: CGImage) -> MTLTexture? {
180 | let textureLoader = MTKTextureLoader(device: self.metalDevice)
181 | do {
182 | let textureOut = try textureLoader.newTexture(cgImage: image)
183 | return textureOut
184 | }
185 | catch {
186 | return nil
187 | }
188 | }
189 | private func texture(pixelFormat: MTLPixelFormat, width: Int, height: Int) -> MTLTexture? {
190 |
191 | let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: pixelFormat, width: width, height: height, mipmapped: false)
192 | textureDescriptor.usage = [.shaderRead, .shaderWrite]
193 |
194 | return self.metalDevice.makeTexture(descriptor: textureDescriptor)
195 | }
196 |
197 |
198 | }
199 | enum ColorFilter: Equatable {
200 | case none
201 | case magma
202 | var colors: [Int] {
203 | switch self {
204 | case .none:
205 | fatalError("Not applicable")
206 | case .magma:
207 | return [0x000003,0x000004,0x000006,0x010007,0x010109,0x01010B,0x02020D,0x02020F,0x030311,0x040313,0x040415,0x050417,0x060519,0x07051B,0x08061D,0x09071F,0x0A0722,0x0B0824,0x0C0926,0x0D0A28,0x0E0A2A,0x0F0B2C,0x100C2F,0x110C31,0x120D33,0x140D35,0x150E38,0x160E3A,0x170F3C,0x180F3F,0x1A1041,0x1B1044,0x1C1046,0x1E1049,0x1F114B,0x20114D,0x221150,0x231152,0x251155,0x261157,0x281159,0x2A115C,0x2B115E,0x2D1060,0x2F1062,0x301065,0x321067,0x341068,0x350F6A,0x370F6C,0x390F6E,0x3B0F6F,0x3C0F71,0x3E0F72,0x400F73,0x420F74,0x430F75,0x450F76,0x470F77,0x481078,0x4A1079,0x4B1079,0x4D117A,0x4F117B,0x50127B,0x52127C,0x53137C,0x55137D,0x57147D,0x58157E,0x5A157E,0x5B167E,0x5D177E,0x5E177F,0x60187F,0x61187F,0x63197F,0x651A80,0x661A80,0x681B80,0x691C80,0x6B1C80,0x6C1D80,0x6E1E81,0x6F1E81,0x711F81,0x731F81,0x742081,0x762181,0x772181,0x792281,0x7A2281,0x7C2381,0x7E2481,0x7F2481,0x812581,0x822581,0x842681,0x852681,0x872781,0x892881,0x8A2881,0x8C2980,0x8D2980,0x8F2A80,0x912A80,0x922B80,0x942B80,0x952C80,0x972C7F,0x992D7F,0x9A2D7F,0x9C2E7F,0x9E2E7E,0x9F2F7E,0xA12F7E,0xA3307E,0xA4307D,0xA6317D,0xA7317D,0xA9327C,0xAB337C,0xAC337B,0xAE347B,0xB0347B,0xB1357A,0xB3357A,0xB53679,0xB63679,0xB83778,0xB93778,0xBB3877,0xBD3977,0xBE3976,0xC03A75,0xC23A75,0xC33B74,0xC53C74,0xC63C73,0xC83D72,0xCA3E72,0xCB3E71,0xCD3F70,0xCE4070,0xD0416F,0xD1426E,0xD3426D,0xD4436D,0xD6446C,0xD7456B,0xD9466A,0xDA4769,0xDC4869,0xDD4968,0xDE4A67,0xE04B66,0xE14C66,0xE24D65,0xE44E64,0xE55063,0xE65162,0xE75262,0xE85461,0xEA5560,0xEB5660,0xEC585F,0xED595F,0xEE5B5E,0xEE5D5D,0xEF5E5D,0xF0605D,0xF1615C,0xF2635C,0xF3655C,0xF3675B,0xF4685B,0xF56A5B,0xF56C5B,0xF66E5B,0xF6705B,0xF7715B,0xF7735C,0xF8755C,0xF8775C,0xF9795C,0xF97B5D,0xF97D5D,0xFA7F5E,0xFA805E,0xFA825F,0xFB8460,0xFB8660,0xFB8861,0xFB8A62,0xFC8C63,0xFC8E63,0xFC9064,0xFC9265,0xFC9366,0xFD9567,0xFD9768,0xFD9969,0xFD9B6A,0xFD9D6B,0xFD9F6C,0xFDA16E,0xFDA26F,0xFDA470,0xFEA671,0xFEA873,0xFEAA74,0xFEAC75,0xFEAE76,0xFEAF78,0xFEB179,0xFEB37B,0xFEB57C,0xFEB77D,0xFEB97F,0xFEBB80,0xFEBC82,0xFEBE83,0xFEC085,0xFEC286,0xFEC488,0xFEC689,0xFEC78B,0xFEC98D,0xFECB8E,0xFDCD90,0xFDCF92,0xFDD193,0xFDD295,0xFDD497,0xFDD698,0xFDD89A,0xFDDA9C,0xFDDC9D,0xFDDD9F,0xFDDFA1,0xFDE1A3,0xFCE3A5,0xFCE5A6,0xFCE6A8,0xFCE8AA,0xFCEAAC,0xFCECAE,0xFCEEB0,0xFCF0B1,0xFCF1B3,0xFCF3B5,0xFCF5B7,0xFBF7B9,0xFBF9BB,0xFBFABD,0xFBFCBF]
208 | }
209 | }
210 | }
211 |
--------------------------------------------------------------------------------
/iOS/AppML/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | en
7 | CFBundleDisplayName
8 | $(PRODUCT_NAME)
9 | CFBundleDocumentTypes
10 |
11 |
12 | CFBundleTypeIconFiles
13 |
14 | CFBundleTypeName
15 | MLModel
16 | LSHandlerRank
17 | Alternate
18 | LSItemContentTypes
19 |
20 | com.apple.coreml.model
21 |
22 |
23 |
24 | CFBundleExecutable
25 | $(EXECUTABLE_NAME)
26 | CFBundleIdentifier
27 | $(PRODUCT_BUNDLE_IDENTIFIER)
28 | CFBundleInfoDictionaryVersion
29 | 6.0
30 | CFBundleName
31 | $(PRODUCT_NAME)
32 | CFBundlePackageType
33 | APPL
34 | CFBundleShortVersionString
35 | $(MARKETING_VERSION)
36 | CFBundleSignature
37 | ????
38 | CFBundleVersion
39 | $(CURRENT_PROJECT_VERSION)
40 | LSRequiresIPhoneOS
41 |
42 | LSSupportsOpeningDocumentsInPlace
43 |
44 | NSAppTransportSecurity
45 |
46 | NSAllowsArbitraryLoads
47 |
48 |
49 | NSCameraUsageDescription
50 | AppML uses the camera to capture frames.
51 | NSPhotoLibraryUsageDescription
52 | AppML saves captured photos to your photo library.
53 | UILaunchStoryboardName
54 | Launch Screen
55 | UIRequiredDeviceCapabilities
56 |
57 | armv7
58 |
59 | UIRequiresFullScreen
60 |
61 | UIStatusBarHidden
62 |
63 | UISupportedInterfaceOrientations
64 |
65 | UIInterfaceOrientationLandscapeLeft
66 |
67 | UISupportedInterfaceOrientations~ipad
68 |
69 | UIInterfaceOrientationPortrait
70 | UIInterfaceOrientationPortraitUpsideDown
71 | UIInterfaceOrientationLandscapeLeft
72 | UIInterfaceOrientationLandscapeRight
73 |
74 | UIViewControllerBasedStatusBarAppearance
75 |
76 | UTImportedTypeDeclarations
77 |
78 |
79 | UTTypeConformsTo
80 |
81 | public.data
82 |
83 | UTTypeDescription
84 | CoreML Model
85 | UTTypeIconFiles
86 |
87 | UTTypeIdentifier
88 | com.apple.coreml.model
89 | UTTypeTagSpecification
90 |
91 | public.filename-extension
92 |
93 | MLMODEL
94 | mlmodel
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
--------------------------------------------------------------------------------
/iOS/AppML/Models/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Models/.DS_Store
--------------------------------------------------------------------------------
/iOS/AppML/Models/Pydnet.mlmodel:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Models/Pydnet.mlmodel
--------------------------------------------------------------------------------
/iOS/AppML/Mods/CameraOutput.swift:
--------------------------------------------------------------------------------
1 | //
2 | // CameraOutput.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 07/08/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | protocol CameraOutput {}
11 |
--------------------------------------------------------------------------------
/iOS/AppML/Mods/MonoCameraOutput.swift:
--------------------------------------------------------------------------------
1 | //
2 | // MonoCameraOutput.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 07/08/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreVideo
11 | struct MonoCameraOutput: CameraOutput {
12 | let frame: CVPixelBuffer
13 | }
14 |
--------------------------------------------------------------------------------
/iOS/AppML/Mods/NeuralNetwork+Ext.swift:
--------------------------------------------------------------------------------
1 | //
2 | // NeuralNetwork.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 30/05/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreVideo
11 | import CoreImage
12 |
13 | extension NeuralNetwork {
14 |
15 | func prediction(outputName: String, image: CVPixelBuffer) throws -> CGImage {
16 | let context = CIContext()
17 | let cvPixelBuffer: CVPixelBuffer = try prediction(outputName: outputName, image: image)
18 | let previewImage = CIImage(cvPixelBuffer: cvPixelBuffer)
19 | return context.createCGImage(previewImage, from: previewImage.extent)!
20 | }
21 | func prediction(outputName: String, left: CVPixelBuffer, right: CVPixelBuffer) throws -> CGImage {
22 | let context = CIContext()
23 | let cvPixelBuffer: CVPixelBuffer = try prediction(outputName: outputName, left: left, right: right)
24 | let previewImage = CIImage(cvPixelBuffer: cvPixelBuffer)
25 | return context.createCGImage(previewImage, from: previewImage.extent)!
26 | }
27 |
28 | enum NeuralNetworkError: Error {
29 | case tooManyInputs
30 | case differentInputsSize
31 | case differentInputsPixelFormatType
32 | case inputNotFound
33 | case outputNotFound
34 | case invalidInput
35 | case invalidOutput
36 | case unsupportedMode
37 | var localizedDescription: String {
38 | switch self {
39 | case .tooManyInputs:
40 | return "Too many inputs"
41 | case .differentInputsSize:
42 | return "Inputs have different size"
43 | case .differentInputsPixelFormatType:
44 | return "Inputs have different pixel format type"
45 | case .inputNotFound:
46 | return "Input not found"
47 | case .outputNotFound:
48 | return "Output not found"
49 | case .invalidInput:
50 | return "Input is not valid"
51 | case .invalidOutput:
52 | return "Output is not valid"
53 | case .unsupportedMode:
54 | return "This mode is not supported on this network"
55 | }
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/iOS/AppML/Mods/NeuralNetwork.swift:
--------------------------------------------------------------------------------
1 | //
2 | // FileNeuralNetwork.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 06/08/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreML
11 | class NeuralNetwork: Equatable {
12 | let name: String
13 | private let model: MLModel
14 | let type: StreamType
15 | let inputSize: CGSize
16 | let outputs: [String: CGSize]
17 | var desiredOutput: String {
18 | didSet {
19 | guard oldValue != desiredOutput else{
20 | return
21 | }
22 | var networkSettings = self.networkSettings
23 | networkSettings[defaultsKeys.networkDesiredOutput] = desiredOutput
24 | self.networkSettings = networkSettings
25 | }
26 | }
27 | private(set) var url: URL? = nil
28 | var scaleFactor: Float {
29 | didSet {
30 | guard oldValue != scaleFactor else{
31 | return
32 | }
33 | var networkSettings = self.networkSettings
34 | networkSettings[defaultsKeys.networkScaleFactor] = scaleFactor
35 | self.networkSettings = networkSettings
36 | }
37 | }
38 | var disparity: Bool {
39 | didSet {
40 | guard oldValue != disparity else{
41 | return
42 | }
43 | var networkSettings = self.networkSettings
44 | networkSettings[defaultsKeys.networkIsDisparity] = disparity
45 | self.networkSettings = networkSettings
46 | }
47 | }
48 | private let input: String?
49 | private let leftInput: String?
50 | private let rightInput: String?
51 | convenience init(url: URL, disparity: Bool = true, scaleFactor: Float = 1.0) throws{
52 | let model = try MLModel(contentsOf: url)
53 | try self.init(name: (url.lastPathComponent as NSString).deletingPathExtension, model: model, disparity: disparity, scaleFactor: scaleFactor)
54 | self.url = url
55 | }
56 | init(name: String, model: MLModel, disparity: Bool = true, scaleFactor: Float = 10.5) throws{
57 | self.model = model
58 | guard !model.modelDescription.inputDescriptionsByName.isEmpty else{
59 | throw NeuralNetworkError.inputNotFound
60 | }
61 | guard !model.modelDescription.outputDescriptionsByName.isEmpty else{
62 | throw NeuralNetworkError.outputNotFound
63 | }
64 | self.name = name
65 | let inputDescriptions = model.modelDescription.inputDescriptionsByName.filter{ $0.value.type == .image}
66 |
67 | if(inputDescriptions.count == 1){
68 | type = .mono
69 | let input = inputDescriptions.first!
70 | self.leftInput = nil
71 | self.rightInput = nil
72 | self.input = input.key
73 | let inputIC = input.value.imageConstraint!
74 | inputSize = CGSize(width: inputIC.pixelsWide, height: inputIC.pixelsHigh)
75 | }else if(inputDescriptions.count == 2){
76 | type = .stereo
77 | let keys = inputDescriptions.keys.sorted()
78 | let leftInput = inputDescriptions.first(where: { $0.key.localizedCaseInsensitiveContains("left")}) ?? inputDescriptions.first(where: {$0.key == keys.first})!
79 | let rightInput = inputDescriptions.first(where: { $0.key.localizedCaseInsensitiveContains("right")}) ?? inputDescriptions.first(where: {$0.key == keys.last})!
80 | self.leftInput = leftInput.key
81 | self.rightInput = rightInput.key
82 | self.input = nil
83 | let leftInputIC = leftInput.value.imageConstraint!
84 | let rightInputIC = rightInput.value.imageConstraint!
85 | guard leftInputIC.pixelsHigh == rightInputIC.pixelsHigh,
86 | leftInputIC.pixelsWide == rightInputIC.pixelsWide else{
87 | throw NeuralNetworkError.differentInputsSize
88 | }
89 | guard leftInputIC.pixelFormatType == rightInputIC.pixelFormatType else{
90 | throw NeuralNetworkError.differentInputsPixelFormatType
91 | }
92 | inputSize = CGSize(width: leftInputIC.pixelsWide, height: leftInputIC.pixelsHigh)
93 | }else{
94 | throw NeuralNetworkError.invalidInput
95 | }
96 | let outputDescriptions = model.modelDescription.outputDescriptionsByName.filter{ $0.value.type == .image}
97 | guard !outputDescriptions.isEmpty else{
98 | throw NeuralNetworkError.invalidOutput
99 | }
100 | var outputs: [String: CGSize] = [:]
101 | for outputDescription in outputDescriptions {
102 | guard let imageConstraint = outputDescription.value.imageConstraint else{
103 | throw NeuralNetworkError.invalidOutput
104 | }
105 | outputs[outputDescription.key] = CGSize(width: imageConstraint.pixelsWide, height: imageConstraint.pixelsHigh)
106 | }
107 | self.outputs = outputs
108 | self.disparity = disparity
109 | self.scaleFactor = scaleFactor
110 | self.desiredOutput = outputs.keys.sorted().first!
111 | loadCustomSettings()
112 | }
113 | func refresh(){
114 | loadCustomSettings()
115 | }
116 | func prediction(outputName: String, image: CVPixelBuffer) throws -> CVPixelBuffer {
117 | guard outputs.keys.contains(outputName) else{
118 | throw NeuralNetworkError.outputNotFound
119 | }
120 | guard type == .mono else{
121 | throw NeuralNetworkError.unsupportedMode
122 | }
123 | let featureProvider = MonoInputFeatureProvider(inputName: input!, input: image)
124 | let outputFeatureProvider = try model.prediction(from: featureProvider)
125 | guard let output = outputFeatureProvider.featureValue(for: outputName)?.imageBufferValue else{
126 | throw NeuralNetworkError.invalidOutput
127 | }
128 | return output
129 |
130 | }
131 | func prediction(outputName: String, left: CVPixelBuffer, right: CVPixelBuffer) throws -> CVPixelBuffer {
132 | guard outputs.keys.contains(outputName) else{
133 | throw NeuralNetworkError.outputNotFound
134 | }
135 | guard type == .stereo else{
136 | throw NeuralNetworkError.unsupportedMode
137 | }
138 | let featureProvider = StereoInputFeatureProvider(leftName: leftInput!, rightName: rightInput!, left: left, right: right)
139 | let outputFeatureProvider = try model.prediction(from: featureProvider)
140 | guard let output = outputFeatureProvider.featureValue(for: outputName)?.imageBufferValue else{
141 | throw NeuralNetworkError.invalidOutput
142 | }
143 | return output
144 | }
145 | struct ImageFeature {
146 | let size: CGSize
147 | let name: String
148 | }
149 | static func ==(lhs: NeuralNetwork, rhs: NeuralNetwork) -> Bool{
150 | return lhs.url == rhs.url &&
151 | lhs.name == rhs.name
152 | && lhs.type == rhs.type
153 | && lhs.inputSize == rhs.inputSize
154 | && lhs.input == rhs.input
155 | && lhs.outputs == rhs.outputs
156 | }
157 |
158 | }
159 | // MARK: Customization options
160 | extension NeuralNetwork {
161 | private var userDefaultID: String {
162 | let id: String
163 | if let nonOptURL = url {
164 | id = nonOptURL.lastPathComponent
165 | }else{
166 | id = name
167 | }
168 | return id
169 | }
170 | private var networkSettings: [String: Any] {
171 | get {
172 | guard var settings = UserDefaults.standard.dictionary(forKey: defaultsKeys.networksSettings) else{
173 | let newSettings: [String: [String: Any]] = [userDefaultID:[:]]
174 | UserDefaults.standard.set(newSettings, forKey: defaultsKeys.networksSettings)
175 | return [:]
176 | }
177 | guard let networkSettings = settings[userDefaultID] as? [String: Any] else{
178 | settings[userDefaultID] = [:]
179 | UserDefaults.standard.set(settings, forKey: defaultsKeys.networksSettings)
180 | return [:]
181 | }
182 | return networkSettings
183 | }
184 | set {
185 | var settings = UserDefaults.standard.dictionary(forKey: defaultsKeys.networksSettings) ?? [:]
186 |
187 | settings[userDefaultID] = newValue
188 | UserDefaults.standard.set(settings, forKey: defaultsKeys.networksSettings)
189 | }
190 | }
191 | private func loadCustomSettings(){
192 | let networkSetting = self.networkSettings
193 | if let disparity = networkSetting[defaultsKeys.networkIsDisparity] as? Bool {
194 | self.disparity = disparity
195 | }
196 | if let scaleFactor = networkSetting[defaultsKeys.networkScaleFactor] as? Float {
197 | self.scaleFactor = scaleFactor
198 | }
199 | if let desiredOutput = networkSetting[defaultsKeys.networkDesiredOutput] as? String {
200 | self.desiredOutput = desiredOutput
201 | }
202 | }
203 | struct defaultsKeys {
204 | static let networksSettings = "networksSettings"
205 | static let networkIsDisparity = "isDisparity"
206 | static let networkDesiredOutput = "output"
207 | static let networkScaleFactor = "scaleFactor"
208 | }
209 | }
210 |
--------------------------------------------------------------------------------
/iOS/AppML/Mods/StereoCameraOutput.swift:
--------------------------------------------------------------------------------
1 | //
2 | // StereoCameraOutput.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 07/08/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreVideo
11 | struct StereoCameraOutput: CameraOutput {
12 | let leftFrame: CVPixelBuffer
13 | let rightFrame: CVPixelBuffer
14 | }
15 |
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworkRepository/FileSystemNeuralNetworkRepository.swift:
--------------------------------------------------------------------------------
1 | //
2 | // FileSystemNeuralNetworkRepository.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 06/09/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 |
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworkRepository/NeuralNetworkRepository.swift:
--------------------------------------------------------------------------------
1 | //
2 | // NeuralNetworkRepository.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 30/05/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreML
11 | import Model
12 | struct NeuralNetworkRepository {
13 | static var shared = NeuralNetworkRepository()
14 |
15 | private(set) var list: [NeuralNetwork]
16 | var `default`: NeuralNetwork{
17 | return list.first!
18 | }
19 |
20 | private let networksPath: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("networks", isDirectory: true)
21 |
22 | private init() {
23 | if(!FileManager.default.fileExists(atPath: networksPath.absoluteString)){
24 | try! FileManager.default.createDirectory(at: networksPath, withIntermediateDirectories: true)
25 | }
26 | self.list = []
27 | loadNeuralNetworks()
28 | }
29 | private mutating func loadNeuralNetworks(){
30 | list.removeAll()
31 | list.append(try! NeuralNetwork(name: "Pydnet", model: OptimizedPydnet().model, disparity: true, scaleFactor: 10.5))
32 | list.append(try! NeuralNetwork(name: "Pydnet Stereo", model: PydnetS().model, disparity: false, scaleFactor: 1))
33 | list.append(try! NeuralNetwork(name: "Quantized Pydnet",model: PydnetQuantized().model, disparity: true, scaleFactor: 10.5))
34 | let files = (try? FileManager.default.contentsOfDirectory(at: networksPath, includingPropertiesForKeys: nil, options: [])) ?? []
35 | for file in files{
36 | do {
37 | list.append(try NeuralNetwork(url: file))
38 | }catch {
39 | try? FileManager.default.removeItem(at: file)
40 | }
41 | }
42 | }
43 | mutating func add(network: URL) throws{
44 | if(network.pathExtension != "mlmodel"){
45 | throw NetworkCompilationError.wrongFileFormat
46 | }
47 | let compiledURL = try MLModel.compileModel(at: network)
48 | let network = try NeuralNetwork(url: compiledURL)
49 | var file = networksPath.appendingPathComponent(network.name).appendingPathExtension("mlmodelc")
50 | if(FileManager.default.fileExists(atPath: file.absoluteString)){
51 | var fileNameFound = false
52 | var index = 1
53 | repeat{
54 | file = networksPath.appendingPathComponent(network.name + "_\(index)").appendingPathExtension("mlmodelc")
55 | if(!FileManager.default.fileExists(atPath: file.absoluteString)){
56 | fileNameFound = true
57 | }else{
58 | index+=1;
59 | }
60 | }while(!fileNameFound)
61 | }
62 | try FileManager.default.copyItem(at: compiledURL, to: file)
63 | list.append(try NeuralNetwork(url: file))
64 | }
65 | func get(name: String) -> NeuralNetwork? {
66 | return list.first(where: { $0.name == name})
67 | }
68 | func getAll() -> [NeuralNetwork]{
69 | return list
70 | }
71 | mutating func delete(network: NeuralNetwork) throws {
72 | guard let url = network.url else{
73 | throw NetworkDeletionError.nonDeletable
74 | }
75 | try FileManager.default.removeItem(at: url)
76 | list.removeAll(where: {$0 == network})
77 | }
78 | }
79 | enum NetworkCompilationError: Error {
80 | case wrongFileFormat
81 | var localizedDescription: String {
82 | switch self {
83 | case .wrongFileFormat:
84 | return "Wrong file format"
85 | }
86 | }
87 | }
88 | enum NetworkDeletionError: Error {
89 | case nonDeletable
90 | var localizedDescription: String {
91 | switch self {
92 | case .nonDeletable:
93 | return "This network is not deletable"
94 | }
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworks/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/NeuralNetworks/.DS_Store
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworks/Helpers/CVPixelBuffer+createCGImage.swift:
--------------------------------------------------------------------------------
1 | //
2 | // CGImage+createPixelBuffer.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 27/07/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreGraphics
11 | import CoreVideo
12 | import VideoToolbox
13 | extension CVPixelBuffer{
14 | public func createCGImage() -> CGImage? {
15 | var cgImage: CGImage?
16 | VTCreateCGImageFromCVPixelBuffer(self, options: nil, imageOut: &cgImage)
17 | return cgImage
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworks/Helpers/MonoInputFeatureProvider.swift:
--------------------------------------------------------------------------------
1 | //
2 | // MonoInputFeatureProvider.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 07/08/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreML
11 | import CoreVideo
12 | class MonoInputFeatureProvider : MLFeatureProvider {
13 | private let inputName: String
14 | private let input: CVPixelBuffer
15 |
16 | var featureNames: Set {
17 | get {
18 | return [inputName]
19 | }
20 | }
21 |
22 | func featureValue(for featureName: String) -> MLFeatureValue? {
23 | if (featureName == inputName) {
24 | return MLFeatureValue(pixelBuffer: input)
25 | }
26 | return nil
27 | }
28 |
29 | init(inputName: String, input: CVPixelBuffer) {
30 | self.inputName = inputName
31 | self.input = input
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworks/Helpers/StereoInputFeatureProvider.swift:
--------------------------------------------------------------------------------
1 | //
2 | // StereoInputFeatureProvider.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 07/08/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | import CoreML
11 | import CoreVideo
12 | class StereoInputFeatureProvider : MLFeatureProvider {
13 | private let leftName: String
14 | private let rightName: String
15 | private let left: CVPixelBuffer
16 | private let right: CVPixelBuffer
17 |
18 | var featureNames: Set {
19 | get {
20 | return [leftName, rightName]
21 | }
22 | }
23 |
24 | func featureValue(for featureName: String) -> MLFeatureValue? {
25 | if (featureName == rightName) {
26 | return MLFeatureValue(pixelBuffer: right)
27 | }
28 | if (featureName == leftName) {
29 | return MLFeatureValue(pixelBuffer: left)
30 | }
31 | return nil
32 | }
33 |
34 | init(leftName: String, rightName: String, left: CVPixelBuffer, right: CVPixelBuffer) {
35 | self.leftName = leftName
36 | self.rightName = rightName
37 | self.left = left
38 | self.right = right
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworks/Pydnet/OptimizedPydnet.mlmodel:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/NeuralNetworks/Pydnet/OptimizedPydnet.mlmodel
--------------------------------------------------------------------------------
/iOS/AppML/NeuralNetworks/QuantizedPydnet/PydnetQuantized.mlmodel:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/NeuralNetworks/QuantizedPydnet/PydnetQuantized.mlmodel
--------------------------------------------------------------------------------
/iOS/AppML/Storyboards/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Storyboards/.DS_Store
--------------------------------------------------------------------------------
/iOS/AppML/Storyboards/Base.lproj/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/iOS/AppML/Storyboards/Base.lproj/.DS_Store
--------------------------------------------------------------------------------
/iOS/AppML/Storyboards/Base.lproj/LaunchScreen.storyboard:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/iOS/AppML/Storyboards/Base.lproj/Main.storyboard:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
--------------------------------------------------------------------------------
/iOS/AppML/View/Main/MainViewController.swift:
--------------------------------------------------------------------------------
1 | //
2 | // MainViewController.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 21/04/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 |
10 | import UIKit
11 | import AVFoundation
12 | import Photos
13 | import MobileCoreServices
14 | import Accelerate
15 | import CoreML
16 | import VideoToolbox
17 | import RxSwift
18 |
19 | class MainViewController: UIViewController {
20 |
21 | @IBOutlet var typeSegmentedControl: UISegmentedControl!
22 | @IBOutlet private var stackView: UIStackView!
23 | @IBOutlet private var fpsLabel: UILabel!
24 | @IBOutlet private var previewView: UIImageView!
25 | @IBOutlet private var depthPreviewView: UIImageView!
26 | @IBOutlet private var settingsButton: UIButton!
27 | @IBOutlet private var colorFilterButton: UIButton!
28 | private var isVisible: Bool = false
29 | private let applicationViewModel = MainViewModel()
30 | private let disposeBag = DisposeBag()
31 |
32 | // MARK: View Controller Life Cycle
33 | override func viewDidLoad() {
34 | super.viewDidLoad()
35 | typeSegmentedControl.rx.selectedSegmentIndex.map{ $0 == 1 }.bind(to: applicationViewModel.showDepthPreview).disposed(by: disposeBag)
36 | applicationViewModel.showDepthPreview.map{ $0 ? 1 : 0}.bind(to: typeSegmentedControl.rx.selectedSegmentIndex).disposed(by: disposeBag)
37 | applicationViewModel.showDepthPreview.map{ !$0 }.bind(to: colorFilterButton.rx.isHidden).disposed(by: disposeBag)
38 | applicationViewModel.showDepthPreview.map{ !$0 }.bind(to: depthPreviewView.rx.isHidden).disposed(by: disposeBag)
39 | applicationViewModel.depthPreviewImage.drive(depthPreviewView.rx.image).disposed(by: disposeBag)
40 | applicationViewModel.previewImage.drive(previewView.rx.image).disposed(by: disposeBag)
41 | applicationViewModel.fps.map{ "FPS: \($0)"}.drive(fpsLabel.rx.text).disposed(by: disposeBag)
42 | applicationViewModel.colorFilter.map{ $0 != .none}.bind(to: colorFilterButton.rx.isSelected).disposed(by: disposeBag)
43 | applicationViewModel.onError.drive(onNext: { error in
44 | switch error {
45 | case SessionSetupError.needAuthorization:
46 | self.requestAccessToVideoStream()
47 | case SessionSetupError.authorizationDenied:
48 | self.askToChangePrivacySettings()
49 | case SessionSetupError.configurationFailed:
50 | self.show(error: "Unable to capture media")
51 | case SessionSetupError.multiCamNotSupported:
52 | self.show(error: "Multi cam is not supported")
53 | default:
54 | self.show(error: error.localizedDescription)
55 | }
56 | }).disposed(by: disposeBag)
57 | applicationViewModel.onShowColorFilterPicker.map{ [unowned self] in self.showColorFilterPicker() }.subscribe().disposed(by: disposeBag)
58 | colorFilterButton.rx.tap.bind(to: applicationViewModel.onShowColorFilterPicker).disposed(by: disposeBag)
59 |
60 | }
61 | override func viewWillAppear(_ animated: Bool) {
62 | super.viewWillAppear(animated)
63 | self.applicationViewModel.isRunning.accept(true)
64 | }
65 | override func viewWillDisappear(_ animated: Bool) {
66 | super.viewWillDisappear(animated)
67 | self.applicationViewModel.isRunning.accept(false)
68 | }
69 | private func show(error: String){
70 | let alertController = UIAlertController(title: "AppML", message: error, preferredStyle: .alert)
71 |
72 | alertController.addAction(UIAlertAction(title: "Ok",
73 | style: .cancel,
74 | handler: nil))
75 |
76 | self.present(alertController, animated: true, completion: nil)
77 | }
78 | private func requestAccessToVideoStream(){
79 | AVCaptureDevice.requestAccess(for: .video, completionHandler: { granted in
80 | if !granted {
81 | self.askToChangePrivacySettings()
82 | }else{
83 | self.applicationViewModel.isRunning.accept(true)
84 | }
85 | })
86 | }
87 | private func askToChangePrivacySettings(){
88 | let changePrivacySetting = "AppML doesn't have permission to use the camera, please change privacy settings"
89 | let alertController = UIAlertController(title: "AppML", message: changePrivacySetting, preferredStyle: .alert)
90 |
91 | alertController.addAction(UIAlertAction(title: "Ok",
92 | style: .cancel,
93 | handler: nil))
94 |
95 | alertController.addAction(UIAlertAction(title: "Settings",
96 | style: .`default`,
97 | handler: { _ in
98 | UIApplication.shared.open(URL(string: UIApplication.openSettingsURLString)!,
99 | options: [:],
100 | completionHandler: nil)
101 | }))
102 |
103 | self.present(alertController, animated: true, completion: nil)
104 | }
105 | override var shouldAutorotate: Bool {
106 | return false
107 | }
108 | @IBAction func showColorFilterPicker() {
109 | let alert = UIAlertController(title: "Colormap", message: "", preferredStyle: .alert)
110 | alert.addAction(UIAlertAction(title: "Magma", style: .default, handler: { [unowned self] _ in
111 | self.applicationViewModel.colorFilter.accept(.magma)
112 | }))
113 | alert.addAction(UIAlertAction(title: "None", style: .cancel, handler: { [unowned self] _ in
114 | self.applicationViewModel.colorFilter.accept(.none)
115 | }))
116 |
117 | self.present(alert, animated: true)
118 |
119 | }
120 | }
121 |
--------------------------------------------------------------------------------
/iOS/AppML/View/Main/MainViewModel.swift:
--------------------------------------------------------------------------------
1 | //
2 | // MainViewModel.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 05/09/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import UIKit
10 | import AVFoundation
11 | import RxSwift
12 | import RxRelay
13 | import RxCocoa
14 | import Accelerate
15 |
16 | class MainViewModel {
17 | let fps: Driver
18 | let previewImage: Driver
19 | private(set) var depthPreviewImage: Driver = Driver.empty()
20 | let isRunning = BehaviorRelay(value: false)
21 | let showDepthPreview = BehaviorRelay(value: false)
22 | let colorFilter = BehaviorRelay(value: .none)
23 | let onShowColorFilterPicker = PublishSubject()
24 | let onError: Driver
25 | private var cameraStream: CameraStream?
26 |
27 | private let privateOnError = PublishSubject()
28 | private let privateFPS = PublishSubject()
29 | private let privatePreviewImage = PublishSubject()
30 | private let privateDepthPreviewImage = PublishSubject()
31 | private let selectedNeuralNetwork = Pydnet()
32 | private let inputSize: CGSize = CGSize(width: 640, height: 384)
33 | private let disposeBag = DisposeBag()
34 |
35 | init() {
36 | fps = privateFPS.skip(1).asDriver(onErrorDriveWith: Driver.empty())
37 | previewImage = privatePreviewImage.asDriver(onErrorDriveWith: Driver.empty())
38 | onError = privateOnError.asDriver(onErrorDriveWith: Driver.empty())
39 | depthPreviewImage = privateDepthPreviewImage
40 | .map{ [unowned self] in UIImage(cgImage: self.applyColorMap(toImage: $0)) }
41 | .asDriver(onErrorDriveWith: Driver.empty())
42 |
43 |
44 | self.setupCameraController()
45 | self.configureCameraController()
46 |
47 | isRunning.map({ [unowned self] running in
48 | if running {
49 | if let error = self.checkPermission() {
50 | self.privateOnError.onNext(error)
51 | }else{
52 | self.startCameraController()
53 | }
54 | }else{
55 | self.stopCameraController()
56 | }
57 | }).subscribe().disposed(by: disposeBag)
58 | startFPSRecording()
59 | }
60 | // MARK: Logic
61 | private func checkPermission() -> SessionSetupError?{
62 | switch AVCaptureDevice.authorizationStatus(for: .video) {
63 | case .authorized:
64 | // The user has previously granted access to the camera.
65 | return nil
66 | case .notDetermined:
67 | /*
68 | The user has not yet been presented with the option to grant
69 | video access. We suspend the session queue to delay session
70 | setup until the access request has completed.
71 |
72 | Note that audio access will be implicitly requested when we
73 | create an AVCaptureDeviceInput for audio during session setup.
74 | */
75 | return .needAuthorization
76 | default:
77 | // The user has previously denied access.
78 | return .authorizationDenied
79 | }
80 | }
81 | private func setupCameraController(){
82 | stopCameraController()
83 | cameraStream = CameraStream()
84 |
85 | }
86 | private func configureCameraController(){
87 | cameraStream?.configure().subscribe { [weak self] completable in
88 | if case .error(let error) = completable {
89 | self?.privateOnError.onNext(error)
90 | }
91 | }
92 | .disposed(by: disposeBag)
93 |
94 | }
95 | private func startCameraController() {
96 | cameraStream?
97 | .start()
98 | .subscribe(onNext: { output in
99 | self.camera(output: output)
100 | })
101 | .disposed(by: disposeBag)
102 | }
103 | private func stopCameraController() {
104 | cameraStream?.stop()
105 | }
106 |
107 | private func camera(output: CVPixelBuffer) {
108 | var depthImage: CGImage? = nil
109 | var previewimage: CGImage
110 |
111 | if showDepthPreview.value{
112 | let resizedPB = output.resize(newSize: inputSize)!
113 | let start = CFAbsoluteTimeGetCurrent()
114 | let pixelBuffer = try? selectedNeuralNetwork.prediction(im0__0: resizedPB.pixelBuffer!).mul__0
115 | let end = CFAbsoluteTimeGetCurrent()
116 | let timeInterval = end - start
117 | let fps = 1/timeInterval
118 | print(fps)
119 | depthImage = pixelBuffer?.createCGImage()
120 | previewimage = resizedPB
121 | }else{
122 | previewimage = output.createCGImage()!
123 | }
124 | privatePreviewImage.onNext(UIImage(cgImage: previewimage))
125 | if showDepthPreview.value,
126 | let depthImage = depthImage {
127 | privateDepthPreviewImage.onNext(depthImage)
128 | }
129 |
130 | self.samplesCollected+=1
131 | }
132 |
133 | // MARK: Depth Converter
134 | private let photoDepthConverter: ColorMapApplier = MetalColorMapApplier()
135 | private func applyColorMap(toImage image: CGImage) -> CGImage{
136 | self.photoDepthConverter.prepare(colorFilter: colorFilter.value)
137 | return self.photoDepthConverter.render(image: image)!
138 | }
139 | // MARK: FPS Logic
140 | private var samplesCollected: Int = 0
141 |
142 | private var fpsTimer: DispatchSourceTimer?
143 |
144 | private func startFPSRecording(){
145 | fpsTimer = DispatchSource.makeTimerSource(flags: [], queue: DispatchQueue.main)
146 | fpsTimer!.setEventHandler(handler: { [unowned self] in
147 | let samplesCollected = self.samplesCollected
148 | self.samplesCollected = 0
149 | self.privateFPS.onNext(Int(round(Double(samplesCollected)/3.0)))
150 | })
151 | fpsTimer!.schedule(deadline: .now(), repeating: 3)
152 | fpsTimer!.resume()
153 | }
154 | // MARK: Helpers
155 |
156 |
157 | }
158 | extension CVPixelBuffer {
159 | fileprivate func resize(newSize: CGSize)-> CGImage? {
160 | let ciImage = CIImage(cvPixelBuffer: self)
161 | var scale = newSize.width / ciImage.extent.width
162 | if(ciImage.extent.height*scale < newSize.height) {
163 | scale = newSize.height / ciImage.extent.height
164 | }
165 | let transform = CGAffineTransform(scaleX: scale, y: scale)
166 | let context = CIContext()
167 | let retImg = ciImage
168 | .transformed(by: transform)
169 | .cropped(to: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
170 | return context.createCGImage(retImg, from: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
171 |
172 | }
173 | }
174 |
--------------------------------------------------------------------------------
/iOS/AppML/View/Main/PreviewMode.swift:
--------------------------------------------------------------------------------
1 | //
2 | // PreviewMode.swift
3 | // AppML
4 | //
5 | // Created by Giulio Zaccaroni on 05/09/2019.
6 | // Copyright © 2019 Apple. All rights reserved.
7 | //
8 |
9 | import Foundation
10 | enum PreviewMode: Equatable {
11 | case original, depth
12 | }
13 |
--------------------------------------------------------------------------------
/single_inference/.gitignore:
--------------------------------------------------------------------------------
1 | tum/*
2 | nyu/*
3 | kitti/*
4 | *.npz
5 | *.h5
6 | *.tgz
--------------------------------------------------------------------------------
/single_inference/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2020 Filippo Aleotti
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/single_inference/README.md:
--------------------------------------------------------------------------------
1 | # Single inference
2 | Single inference using TensorFlow 1.15 and python 3.x.
3 | You can install requirements by running the script:
4 |
5 | ```
6 | python install -r requirements.txt
7 | ```
8 |
9 | # Train files
10 | The file `train_files.txt` contains the images used to train the network.
11 | In particular:
12 | * `Coco`: http://images.cocodataset.org/zips/unlabeled2017.zip
13 | * `OpenImages`: https://github.com/cvdfoundation/open-images-dataset#download-images-with-bounding-boxes-annotations
14 |
15 | # Run
16 |
17 | 1. Download pretrained TensorFlow model [here](https://drive.google.com/file/d/1Zu41tHv89q_F7N5KFigzyUY5vAc8ufQL/view?usp=sharing), and move it into `ckpt` folder.
18 | 2. run the `run.sh` script.
19 |
20 | # Export
21 | You can generate `.pb`, `tflite` and `mlmodel` of the network by running the command:
22 |
23 | ```
24 | python export.py --ckpt ckpt/pydnet \
25 | --arch pydnet \
26 | --dest "./" \
27 | --height 384 --width 640
28 | ```
29 |
30 | # License
31 | Code is licensed under Apache v2.0
32 | Pre-trained models can be used only for research purposes.
33 | Images inside `test` folder are from [Pexels](https://www.pexels.com/)
--------------------------------------------------------------------------------
/single_inference/eval_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def compute_errors(gt, pred):
5 | """Compute error metrics using predicted and ground truth depths.
6 | From https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
7 | """
8 | thresh = np.maximum((gt / pred), (pred / gt))
9 | a1 = (thresh < 1.25).mean()
10 | a2 = (thresh < 1.25 ** 2).mean()
11 | a3 = (thresh < 1.25 ** 3).mean()
12 |
13 | rmse = (gt - pred) ** 2
14 | rmse = np.sqrt(rmse.mean())
15 |
16 | rmse_log = (np.log(gt) - np.log(pred)) ** 2
17 | rmse_log = np.sqrt(rmse_log.mean())
18 |
19 | abs_rel = np.mean(np.abs(gt - pred) / gt)
20 | sq_rel = np.mean(((gt - pred) ** 2) / gt)
21 |
22 | return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
23 |
24 |
25 | def compute_scale_and_shift(prediction, target, mask):
26 | """From https://gist.github.com/ranftlr/a1c7a24ebb24ce0e2f2ace5bce917022"""
27 |
28 | # system matrix: A = [[a_00, a_01], [a_10, a_11]]
29 | a_00 = np.sum(mask * prediction * prediction)
30 | a_01 = np.sum(mask * prediction)
31 | a_11 = np.sum(mask)
32 |
33 | # right hand side: b = [b_0, b_1]
34 | b_0 = np.sum(mask * prediction * target)
35 | b_1 = np.sum(mask * target)
36 | x_0 = np.zeros_like(b_0)
37 | x_1 = np.zeros_like(b_1)
38 |
39 | det = a_00 * a_11 - a_01 * a_01
40 | # A needs to be a positive definite matrix.
41 | valid = det > 0
42 |
43 | x_0[valid] = (a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
44 | x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
45 |
46 | return x_0, x_1
47 |
--------------------------------------------------------------------------------
/single_inference/export.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Filippo Aleotti
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Export a trained tensorflow model into various formats.
17 | Outputs:
18 | mlmodel: for ios devices
19 | tflite: for android devices
20 | pb: protobuffer, for generic purposes
21 | """
22 | import sys
23 |
24 | sys.path.insert(0, ".")
25 | import tensorflow as tf
26 | import os
27 | import argparse
28 | from tensorflow.python.framework import graph_util
29 | from tensorflow.python.platform import gfile
30 | from tensorflow.python.tools import freeze_graph
31 | from tensorflow.python.tools import optimize_for_inference_lib
32 | from tensorflow.python.saved_model import tag_constants
33 | import network
34 | import tfcoreml
35 | import coremltools
36 | import coremltools.proto.FeatureTypes_pb2 as ft
37 |
38 | tf_version = int(tf.__version__.replace(".", ""))
39 | if tf_version < 1140:
40 | raise ValueError("For this script, tensorflow must be greater or equal to 1.14.0")
41 |
42 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
43 | tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
44 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
45 |
46 | parser = argparse.ArgumentParser(description="Freeze your network")
47 | parser.add_argument("--ckpt", type=str, help="which checkpoint freeze?", required=True)
48 | parser.add_argument("--arch", type=str, help="network to freeze", required=True)
49 | parser.add_argument(
50 | "--dest", type=str, help="where to save frozen models", required=True
51 | )
52 | parser.add_argument("--height", type=int, default=384, help="height of image")
53 | parser.add_argument("--width", type=int, default=640, help="width of image")
54 | parser.add_argument(
55 | "--debug", action="store_true", help="active debug and visualize graph nodes"
56 | )
57 | args = parser.parse_args()
58 |
59 |
60 | def main(_):
61 | params = {
62 | "arch": args.arch,
63 | "output": os.path.join(args.dest, "frozen_models"),
64 | "protobuf": "frozen_" + args.arch + ".pb",
65 | "pbtxt": args.arch + ".pbtxt",
66 | "ckpt": args.arch + ".ckpt",
67 | "mlmodel": args.arch + ".mlmodel",
68 | "onnx": args.arch + ".onnx",
69 | "input_saver_def_path": "",
70 | "input_binary": False,
71 | "restore_op": "save/restore_all",
72 | "saving_op": "save/Const:0",
73 | "frozen_graph_name": "frozen_" + args.arch + ".pb",
74 | "optimized_graph_name": "optimized_" + args.arch + ".pb",
75 | "optimized_tflite_name": "tflite_" + args.arch + ".tflite",
76 | "clear_devices": True,
77 | }
78 |
79 | if not os.path.exists(params["output"]):
80 | os.makedirs(params["output"])
81 |
82 | with tf.Graph().as_default():
83 |
84 | network_params = {
85 | "height": args.height,
86 | "width": args.width,
87 | "is_training": False,
88 | }
89 | input_node = "im0"
90 | input_tensor = tf.placeholder(
91 | tf.float32,
92 | [1, network_params["height"], network_params["width"], 3],
93 | name="im0",
94 | )
95 | model = network.Pydnet(network_params)
96 | predictions = model.forward(input_tensor)
97 | params["output_nodes_port"] = [x.name for x in model.output_nodes]
98 | params["output_nodes"] = [
99 | out.name.replace(":0", "") for out in model.output_nodes
100 | ]
101 | print("=> output nodes port: {}".format(params["output_nodes_port"]))
102 | print("=> output nodes: {}".format(params["output_nodes"]))
103 | params["input_nodes"] = [input_node]
104 | saver = tf.train.Saver()
105 | with tf.Session() as sess:
106 | saver.restore(sess, args.ckpt)
107 |
108 | if args.debug:
109 | for tensor in [
110 | n.name for n in tf.get_default_graph().as_graph_def().node
111 | ]:
112 | print(tensor)
113 |
114 | tf.train.write_graph(sess.graph_def, params["output"], params["pbtxt"])
115 | graph_pbtxt = os.path.join(params["output"], params["pbtxt"])
116 | graph_path = os.path.join(params["output"], params["ckpt"])
117 | saver.save(sess, graph_path)
118 |
119 | outputs = params["output_nodes"][0]
120 | for name in params["output_nodes"][1:]:
121 | outputs += "," + name
122 |
123 | frozen_graph_path = os.path.join(
124 | params["output"], params["frozen_graph_name"]
125 | )
126 | freeze_graph.freeze_graph(
127 | graph_pbtxt,
128 | params["input_saver_def_path"],
129 | params["input_binary"],
130 | graph_path,
131 | outputs,
132 | params["restore_op"],
133 | params["saving_op"],
134 | frozen_graph_path,
135 | params["clear_devices"],
136 | "",
137 | )
138 |
139 | converter = tf.lite.TFLiteConverter.from_frozen_graph(
140 | frozen_graph_path, params["input_nodes"], params["output_nodes"]
141 | )
142 | tflite_model = converter.convert()
143 | optimized_tflite_path = os.path.join(
144 | params["output"], params["optimized_tflite_name"]
145 | )
146 | with open(optimized_tflite_path, "wb") as f:
147 | f.write(tflite_model)
148 |
149 | mlmodel_path = os.path.join(params["output"], params["mlmodel"])
150 | mlmodel = tfcoreml.convert(
151 | tf_model_path=frozen_graph_path,
152 | mlmodel_path=mlmodel_path,
153 | output_feature_names=params["output_nodes_port"],
154 | image_input_names=["im0:0"],
155 | input_name_shape_dict={
156 | "im0:0": [1, network_params["height"], network_params["width"], 3]
157 | },
158 | minimum_ios_deployment_target="12",
159 | image_scale=1 / 255.0,
160 | )
161 |
162 | print("=> setting up input and output of coreml model")
163 | # NOTE: at this point, outputs are MultiArray objects. Instead,
164 | # we have to convert them as GRAYSCALE image
165 | spec = coremltools.utils.load_spec(mlmodel_path)
166 | for output in spec.description.output:
167 | array_shape = tuple(output.type.multiArrayType.shape)
168 | channels, height, width = array_shape
169 | output.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value(
170 | "GRAYSCALE"
171 | )
172 | output.type.imageType.width = width
173 | output.type.imageType.height = height
174 |
175 | updated_model = coremltools.models.MLModel(spec)
176 | updated_model.author = "Filippo Aleotti"
177 | updated_model.license = "Apache v2"
178 | updated_model.short_description = params["arch"]
179 | updated_model.save(mlmodel_path)
180 |
181 | print("Done!")
182 |
183 |
184 | if __name__ == "__main__":
185 | tf.app.run()
186 |
--------------------------------------------------------------------------------
/single_inference/inference.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Filippo Aleotti
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | import tensorflow as tf
17 | import cv2
18 | import numpy as np
19 | import os
20 | import argparse
21 | import glob
22 | from tqdm import tqdm
23 | import matplotlib.pyplot as plt
24 | import network
25 | from tensorflow.python.util import deprecation
26 |
27 | # disable future warnings and info messages for this demo
28 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
29 | tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
30 |
31 | parser = argparse.ArgumentParser(description="Single shot depth estimator")
32 | parser.add_argument(
33 | "--img", type=str, help="path to reference RGB image", required=True
34 | )
35 | parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
36 | parser.add_argument("--cpu", action="store_true", help="run on cpu")
37 | parser.add_argument(
38 | "--original_size", action="store_true", help="if true, restore original image size"
39 | )
40 | parser.add_argument(
41 | "--dest",
42 | type=str,
43 | help="path to result folder. If not exists, it will be created",
44 | default="results",
45 | )
46 |
47 | opts = parser.parse_args()
48 | if opts.cpu:
49 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
50 |
51 |
52 | def create_dir(d):
53 | """ Create a directory if it does not exist
54 | Args:
55 | d: directory to create
56 | """
57 | if not os.path.exists(d):
58 | os.makedirs(d)
59 |
60 |
61 | def main(_):
62 | network_params = {"height": 320, "width": 640, "is_training": False}
63 |
64 | if os.path.isfile(opts.img):
65 | img_list = [opts.img]
66 | elif os.path.isdir(opts.img):
67 | img_list = glob.glob(os.path.join(opts.img, "*.{}".format("png")))
68 | img_list = sorted(img_list)
69 | if len(img_list) == 0:
70 | raise ValueError("No {} images found in folder {}".format(".png", opts.img))
71 | print("=> found {} images".format(len(img_list)))
72 | else:
73 | raise Exception("No image nor folder provided")
74 |
75 | model = network.Pydnet(network_params)
76 | tensor_image = tf.placeholder(tf.float32, shape=(320, 640, 3))
77 | batch_img = tf.expand_dims(tensor_image, 0)
78 | tensor_depth = model.forward(batch_img)
79 | tensor_depth = tf.nn.relu(tensor_depth)
80 |
81 | # restore graph
82 | saver = tf.train.Saver()
83 | sess = tf.Session()
84 | sess.run(tf.global_variables_initializer())
85 | saver.restore(sess, opts.ckpt)
86 |
87 | # run graph
88 | for i in tqdm(range(len(img_list))):
89 |
90 | # preparing image
91 | img = cv2.imread(img_list[i])
92 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
93 | h, w, _ = img.shape
94 | img = cv2.resize(img, (640, 320))
95 | img = img / 255.0
96 |
97 | # inference
98 | depth = sess.run(tensor_depth, feed_dict={tensor_image: img})
99 | depth = np.squeeze(depth)
100 | min_depth = depth.min()
101 | max_depth = depth.max()
102 | depth = (depth - min_depth) / (max_depth - min_depth)
103 | depth *= 255.0
104 |
105 | # preparing final depth
106 | if opts.original_size:
107 | depth = cv2.resize(depth, (w, h))
108 | name = os.path.basename(img_list[i]).split(".")[0]
109 | dest = opts.dest
110 | create_dir(dest)
111 | dest = os.path.join(dest, name + "_depth.png")
112 | plt.imsave(dest, depth, cmap="magma")
113 |
114 |
115 | if __name__ == "__main__":
116 | tf.app.run()
117 |
--------------------------------------------------------------------------------
/single_inference/modules.py:
--------------------------------------------------------------------------------
1 | # MIT License
2 | #
3 | # Copyright (c) 2018 Matteo Poggi m.poggi@unibo.it
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 |
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 |
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | # From https://github.com/mattpoggi/pydnet/blob/master/layers.py
24 |
25 | from __future__ import division
26 | import tensorflow as tf
27 | import numpy as np
28 | import math
29 |
30 |
31 | def leaky_relu(x, alpha=0.2):
32 | return tf.nn.leaky_relu(x, alpha=alpha)
33 |
34 |
35 | ####################################################################################################################################
36 | # 2D convolution wrapper
37 | ####################################################################################################################################
38 | def conv2d_leaky(
39 | input, kernel_shape, bias_shape, strides=1, relu=True, padding="SAME", dil=1
40 | ):
41 | # Conv2D
42 | weights = tf.get_variable(
43 | "weights",
44 | kernel_shape,
45 | initializer=tf.contrib.layers.xavier_initializer(),
46 | dtype=tf.float32,
47 | )
48 | biases = tf.get_variable(
49 | "biases",
50 | bias_shape,
51 | initializer=tf.truncated_normal_initializer(),
52 | dtype=tf.float32,
53 | )
54 | output = tf.nn.conv2d(
55 | input,
56 | weights,
57 | strides=[1, strides, strides, 1],
58 | padding=padding,
59 | dilations=[1, dil, dil, 1],
60 | )
61 | output = tf.nn.bias_add(output, biases)
62 |
63 | # ReLU (if required)
64 | if relu == False:
65 | return output
66 |
67 | output = leaky_relu(output, 0.2)
68 | return output
69 |
70 |
71 | def deconv2d_leaky(
72 | input, kernel_shape, bias_shape, outputShape, strides=1, relu=True, padding="SAME"
73 | ):
74 |
75 | # Conv2D
76 | weights = tf.get_variable(
77 | "weights",
78 | kernel_shape,
79 | initializer=tf.contrib.layers.xavier_initializer(),
80 | dtype=tf.float32,
81 | )
82 | biases = tf.get_variable(
83 | "biases",
84 | bias_shape,
85 | initializer=tf.truncated_normal_initializer(),
86 | dtype=tf.float32,
87 | )
88 | output = tf.nn.conv2d_transpose(
89 | input,
90 | weights,
91 | output_shape=outputShape,
92 | strides=[1, strides, strides, 1],
93 | padding=padding,
94 | )
95 | output = tf.nn.bias_add(output, biases)
96 |
97 | # ReLU (if required)
98 | if relu == False:
99 | print("WARNING: reLU disabled")
100 | else:
101 | output = leaky_relu(output, 0.2)
102 | return output
103 |
104 |
105 | ####################################################################################################################################
106 | # 2D convolution wrapper
107 | ####################################################################################################################################
108 | def dilated_conv2d_leaky(
109 | input, kernel_shape, bias_shape, name, rate=1, relu=True, padding="SAME"
110 | ):
111 | with tf.variable_scope(name):
112 | # Conv2D
113 | weights = tf.get_variable(
114 | "weights", kernel_shape, initializer=tf.contrib.layers.xavier_initializer()
115 | )
116 | biases = tf.get_variable(
117 | "biases", bias_shape, initializer=tf.truncated_normal_initializer()
118 | )
119 | output = tf.nn.atrous_conv2d(input, weights, rate=rate, padding=padding)
120 | output = tf.nn.bias_add(output, biases)
121 |
122 | if relu == False:
123 | print("WARNING: reLU disabled")
124 | else:
125 | output = leaky_relu(output, 0.2)
126 | return output
127 |
128 |
129 | def bilinear_upsampling_by_deconvolution(src):
130 | shape = src.get_shape().as_list()
131 | h = shape[1] * 2
132 | w = shape[2] * 2
133 | return deconv2d_leaky(
134 | src, [2, 2, shape[3], shape[3]], shape[3], [shape[0], h, w, shape[3]], 2, True
135 | )
136 |
137 |
138 | def bilinear_upsampling_by_convolution(src):
139 | with tf.variable_scope("bilinear_upsampling_by_convolution"):
140 | shape = src.get_shape().as_list()
141 | height = shape[1] * 2
142 | width = shape[2] * 2
143 | channels = shape[3]
144 | upsampled_src = tf.image.resize_images(src, [height, width])
145 | upsampled_src = conv2d_leaky(
146 | upsampled_src, [2, 2, channels, channels], [channels]
147 | )
148 | return upsampled_src
149 |
--------------------------------------------------------------------------------
/single_inference/network.py:
--------------------------------------------------------------------------------
1 | #
2 | # MIT License
3 | #
4 | # Copyright (c) 2018 Matteo Poggi m.poggi@unibo.it
5 | #
6 | # Permission is hereby granted, free of charge, to any person obtaining a copy
7 | # of this software and associated documentation files (the "Software"), to deal
8 | # in the Software without restriction, including without limitation the rights
9 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | # copies of the Software, and to permit persons to whom the Software is
11 | # furnished to do so, subject to the following conditions:
12 |
13 | # The above copyright notice and this permission notice shall be included in all
14 | # copies or substantial portions of the Software.
15 |
16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | # SOFTWARE.
23 |
24 | # adapted from https://github.com/mattpoggi/pydnet/blob/master/pydnet.py
25 | import tensorflow as tf
26 | from modules import conv2d_leaky, bilinear_upsampling_by_convolution
27 |
28 |
29 | class Pydnet(object):
30 | def __init__(self, params):
31 | self.params = params
32 | self.height = params["height"]
33 | self.width = params["width"]
34 | self.is_training = params["is_training"]
35 | self.output_nodes = None
36 |
37 | if self.is_training:
38 | self.scales = params["scales"]
39 |
40 | def forward(self, input_image):
41 | """ Single forward of the network
42 | """
43 | encoder_features = self.encoder(input_image)
44 | predictions = self.decoder(encoder_features)
45 | if not self.is_training:
46 | # NOTE: set up nodes for mobile app
47 | self.output_nodes = [self.make_visual(predictions)]
48 | return predictions
49 |
50 | def make_visual(self, prediction):
51 | prediction = tf.nn.relu(prediction)
52 | min_depth = tf.reduce_min(prediction)
53 | max_depth = tf.reduce_max(prediction)
54 | prediction = (prediction - min_depth) / (max_depth - min_depth)
55 | return prediction
56 |
57 | def encoder(self, input_image):
58 | """ Create PyDNet feature extractor
59 | """
60 | with tf.variable_scope("encoder"):
61 | features = []
62 | features.append(input_image)
63 | with tf.variable_scope("conv1a"):
64 | conv1a = conv2d_leaky(input_image, [3, 3, 3, 16], [16], 2, True)
65 | with tf.variable_scope("conv1b"):
66 | conv1b = conv2d_leaky(conv1a, [3, 3, 16, 16], [16], 1, True)
67 |
68 | features.append(conv1b)
69 |
70 | with tf.variable_scope("conv2a"):
71 | conv2a = conv2d_leaky(conv1b, [3, 3, 16, 32], [32], 2, True)
72 | with tf.variable_scope("conv2b"):
73 | conv2b = conv2d_leaky(conv2a, [3, 3, 32, 32], [32], 1, True)
74 |
75 | features.append(conv2b)
76 |
77 | with tf.variable_scope("conv3a"):
78 | conv3a = conv2d_leaky(conv2b, [3, 3, 32, 64], [64], 2, True)
79 | with tf.variable_scope("conv3b"):
80 | conv3b = conv2d_leaky(conv3a, [3, 3, 64, 64], [64], 1, True)
81 |
82 | features.append(conv3b)
83 |
84 | with tf.variable_scope("conv4a"):
85 | conv4a = conv2d_leaky(conv3b, [3, 3, 64, 96], [96], 2, True)
86 | with tf.variable_scope("conv4b"):
87 | conv4b = conv2d_leaky(conv4a, [3, 3, 96, 96], [96], 1, True)
88 |
89 | features.append(conv4b)
90 |
91 | with tf.variable_scope("conv5a"):
92 | conv5a = conv2d_leaky(conv4b, [3, 3, 96, 128], [128], 2, True)
93 | with tf.variable_scope("conv5b"):
94 | conv5b = conv2d_leaky(conv5a, [3, 3, 128, 128], [128], 1, True)
95 |
96 | features.append(conv5b)
97 |
98 | with tf.variable_scope("conv6a"):
99 | conv6a = conv2d_leaky(conv5b, [3, 3, 128, 192], [192], 2, True)
100 | with tf.variable_scope("conv6b"):
101 | conv6b = conv2d_leaky(conv6a, [3, 3, 192, 192], [192], 1, True)
102 |
103 | features.append(conv6b)
104 | return features
105 |
106 | def decoder(self, encoder_features):
107 | """ Create PyDNet decoder
108 | """
109 | with tf.variable_scope("decoder"):
110 | with tf.variable_scope("L6") as scope:
111 | with tf.variable_scope("estimator") as scope:
112 | conv6 = self.build_estimator(encoder_features[6])
113 | prediction_6 = self.get_disp(conv6)
114 | with tf.variable_scope("upsampler") as scope:
115 | upconv6 = bilinear_upsampling_by_convolution(conv6)
116 | # SCALE 5
117 | with tf.variable_scope("L5") as scope:
118 | with tf.variable_scope("estimator") as scope:
119 | conv5 = self.build_estimator(encoder_features[5], upconv6)
120 | prediction_5 = self.get_disp(conv5)
121 | with tf.variable_scope("upsampler") as scope:
122 | upconv5 = bilinear_upsampling_by_convolution(conv5)
123 | # SCALE 4
124 | with tf.variable_scope("L4") as scope:
125 | with tf.variable_scope("estimator") as scope:
126 | conv4 = self.build_estimator(encoder_features[4], upconv5)
127 | prediction_4 = self.get_disp(conv4)
128 | with tf.variable_scope("upsampler") as scope:
129 | upconv4 = bilinear_upsampling_by_convolution(conv4)
130 | # SCALE 3
131 | with tf.variable_scope("L3") as scope:
132 | with tf.variable_scope("estimator") as scope:
133 | conv3 = self.build_estimator(encoder_features[3], upconv4)
134 | prediction_3 = self.get_disp(conv3)
135 | with tf.variable_scope("upsampler") as scope:
136 | upconv3 = bilinear_upsampling_by_convolution(conv3)
137 | # SCALE 2
138 | with tf.variable_scope("L2") as scope:
139 | with tf.variable_scope("estimator") as scope:
140 | conv2 = self.build_estimator(encoder_features[2], upconv3)
141 | prediction_2 = self.get_disp(conv2)
142 | with tf.variable_scope("upsampler") as scope:
143 | upconv2 = bilinear_upsampling_by_convolution(conv2)
144 | # SCALE 1
145 | with tf.variable_scope("L1") as scope:
146 | with tf.variable_scope("estimator") as scope:
147 | conv1 = self.build_estimator(encoder_features[1], upconv2)
148 | prediction_1 = self.get_disp(conv1)
149 |
150 | size = [self.height, self.width]
151 |
152 | if not self.is_training:
153 | with tf.variable_scope("half"):
154 | prediction_1 = tf.image.resize_images(prediction_1, size)
155 | return prediction_1
156 |
157 | prediction_1 = tf.image.resize_images(prediction_1, size)
158 | prediction_2 = tf.image.resize_images(prediction_2, size)
159 | prediction_3 = tf.image.resize_images(prediction_3, size)
160 |
161 | return [prediction_1, prediction_2, prediction_3]
162 |
163 | def get_disp(self, x):
164 | """ Get disparity
165 | """
166 | with tf.variable_scope("get_disp"):
167 | disp = conv2d_leaky(x, [3, 3, x.shape[3], 1], [1], 1, False)
168 | return disp
169 |
170 | # Single scale estimator
171 | def build_estimator(self, features, upsampled_disp=None):
172 | """
173 | Create single scale estimator
174 | """
175 | with tf.variable_scope("build_estimator"):
176 | if upsampled_disp is not None:
177 | disp2 = tf.concat([features, upsampled_disp], -1)
178 | else:
179 | disp2 = features
180 | with tf.variable_scope("disp-3") as scope:
181 | disp3 = conv2d_leaky(disp2, [3, 3, disp2.shape[3], 96], [96], 1, True)
182 | with tf.variable_scope("disp-4") as scope:
183 | disp4 = conv2d_leaky(disp3, [3, 3, disp3.shape[3], 64], [64], 1, True)
184 | with tf.variable_scope("disp-5") as scope:
185 | disp5 = conv2d_leaky(disp4, [3, 3, disp4.shape[3], 32], [32], 1, True)
186 | with tf.variable_scope("disp-6") as scope:
187 | disp6 = conv2d_leaky(
188 | disp5, [3, 3, disp5.shape[3], 8], [8], 1, True
189 | ) # 8 channels for compatibility with @other@ devices
190 | return disp6
191 |
--------------------------------------------------------------------------------
/single_inference/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow==1.15.0
2 | Pillow==7.1.2
3 | tqdm==4.46.0
4 | opencv-python==4.2.0.34
5 | matplotlib==3.2.1
6 | tf2onnx==1.5.6
7 | tfcoreml==1.1
8 | onnx==1.6.0
9 | onnx-coreml==1.2
10 | onnx-tf==1.5.0
--------------------------------------------------------------------------------
/single_inference/run.sh:
--------------------------------------------------------------------------------
1 | # Copyright 2020 Filippo Aleotti
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | #!bin/bash
16 | set -e
17 |
18 |
19 | python inference.py --ckpt "ckpt/pydnet" --img "test"
--------------------------------------------------------------------------------
/single_inference/test/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/single_inference/test/0.png
--------------------------------------------------------------------------------
/single_inference/test/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/single_inference/test/3.png
--------------------------------------------------------------------------------
/single_inference/test/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/single_inference/test/4.png
--------------------------------------------------------------------------------
/single_inference/test/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FilippoAleotti/mobilePydnet/9ab405806630341106514277f4d61156dd697c59/single_inference/test/6.png
--------------------------------------------------------------------------------
/single_inference/test_kitti.py:
--------------------------------------------------------------------------------
1 | """
2 | Evaluate the model using Eigen split of KITTI dataset
3 | - prepare gt depth running the script https://github.com/nianticlabs/monodepth2/blob/master/export_gt_depth.py
4 | """
5 | import argparse
6 | import os
7 |
8 | import cv2
9 | import numpy as np
10 | import tensorflow as tf
11 | from tqdm import tqdm
12 |
13 | from eval_utils import compute_errors, compute_scale_and_shift
14 | from network import Pydnet
15 |
16 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
17 |
18 |
19 | class KITTILoader(object):
20 | def __init__(self, params):
21 | self.params = params
22 | self.height = params["height"]
23 | self.width = params["width"]
24 | self.data_list_file = params["data_list_file"]
25 | self.data_path = params["data_path"]
26 | self.num_workers = 4
27 | self.data_list = np.loadtxt(self.data_list_file, dtype=bytes).astype(np.str)
28 | self.default_img_shape = None
29 |
30 | def read_and_decode(self, filename_queue):
31 | """Read jpeg file from file system"""
32 | img0_name = tf.strings.join([self.data_path, "/", filename_queue, ".jpg"])
33 | img0 = tf.image.decode_jpeg(tf.io.read_file(img0_name), channels=3)
34 | img0 = tf.cast(img0, tf.float32)
35 | return img0
36 |
37 | def preprocess(self, filename_queue):
38 | """Prepare single image at testing time"""
39 | img0 = self.read_and_decode(filename_queue)
40 | img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA)
41 | img0.set_shape([self.height, self.width, 3])
42 | img0 = img0 / 255.0
43 | return img0
44 |
45 | def create_iterator(self, num_parallel_calls=4):
46 | """Create iterator"""
47 | data_list = tf.convert_to_tensor(self.data_list, dtype=tf.string)
48 | dataset = tf.data.Dataset.from_tensor_slices(data_list)
49 | dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls)
50 | dataset = dataset.batch(1)
51 | dataset = dataset.repeat()
52 | iterator = dataset.make_initializable_iterator()
53 | return iterator
54 |
55 |
56 | def read_test_files(test_file) -> list:
57 | """Read test files from txt file"""
58 | assert os.path.exists(test_file)
59 | with open(test_file, "r") as f:
60 | lines = f.readlines()
61 | lines = [l.strip() for l in lines]
62 | return lines
63 |
64 |
65 | def run_inference(opts):
66 | """Run the model on KITTI"""
67 | network_params = {"height": 320, "width": 640, "is_training": False}
68 | dataset_params = {
69 | "height": 320,
70 | "width": 640,
71 | "data_path": opts.data_path,
72 | "data_list_file": opts.data_list_file,
73 | }
74 | dataset = KITTILoader(dataset_params)
75 |
76 | iterator = dataset.create_iterator()
77 | batch_img = iterator.get_next()
78 |
79 | network = Pydnet(network_params)
80 | predicted_idepth = network.forward(batch_img)
81 | predicted_idepth = tf.nn.relu(predicted_idepth)
82 |
83 | # restore graph
84 | saver = tf.train.Saver()
85 | sess = tf.Session()
86 | sess.run(tf.compat.v1.global_variables_initializer())
87 | sess.run(iterator.initializer)
88 | saver.restore(sess, opts.ckpt)
89 |
90 | os.makedirs(opts.dest, exist_ok=True)
91 | test_images = read_test_files(opts.data_list_file)
92 | num_images = len(test_images)
93 | with tqdm(total=num_images) as pbar:
94 | for i in range(num_images):
95 | idepth = sess.run(predicted_idepth)
96 | idepth = np.squeeze(idepth)
97 | min_idepth = idepth.min()
98 | max_idepth = idepth.max()
99 | norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth)
100 | norm_idepth *= 255.0
101 |
102 | target_path = os.path.join(opts.data_path, f"{test_images[i]}.jpg")
103 | target = cv2.imread(target_path)
104 | h, w = target.shape[:2]
105 | norm_idepth = cv2.resize(norm_idepth, (w, h))
106 |
107 | img_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
108 | cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16))
109 | pbar.update(1)
110 | print("Inference done!")
111 |
112 |
113 | def eval(opts):
114 | """Compute error metrics."""
115 | errors = []
116 | test_images = read_test_files(opts.data_list_file)
117 | print("=> loading gt data")
118 | gt_depths = np.load(opts.gt_path, fix_imports=True, encoding="latin1", allow_pickle=True)[
119 | "data"
120 | ]
121 | print("=> starting evaluation")
122 | with tqdm(total=len(test_images)) as pbar:
123 | for i in range(len(test_images)):
124 | target = gt_depths[i]
125 | pred_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
126 | prediction_idepth = cv2.imread(pred_path, -1) / 256.0
127 |
128 | mask = (target > 1e-3) & (target < opts.max_depth)
129 |
130 | target_idepth = np.zeros_like(target)
131 | target_idepth[mask == 1] = 1.0 / target[mask == 1]
132 | scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask)
133 | prediction_idepth_aligned = scale * prediction_idepth + shift
134 |
135 | disparity_cap = 1.0 / opts.max_depth
136 | prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap
137 | prediciton_depth_aligned = 1.0 / prediction_idepth_aligned
138 |
139 | prediciton_depth_aligned = prediciton_depth_aligned[mask == 1]
140 | target = target[mask == 1]
141 | errors.append(compute_errors(target, prediciton_depth_aligned))
142 |
143 | pbar.update(1)
144 |
145 | mean_errors = np.array(errors).mean(0)
146 | labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
147 | for i in range(len(labels)):
148 | print(f"{labels[i]}:{mean_errors[i]}")
149 |
150 | print("Evaluation done!")
151 |
152 |
153 | if __name__ == "__main__":
154 | parser = argparse.ArgumentParser(description="Evaluate depth network on KITTI")
155 | parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
156 | parser.add_argument("--data_path", type=str, help="path to kitti", required=True)
157 | parser.add_argument("--gt_path", type=str, help="path to gt_depths.npz", required=True)
158 | parser.add_argument(
159 | "--data_list_file", type=str, help="path to data list", default="test_kitti.txt"
160 | )
161 | parser.add_argument("--dest", type=str, help="prediction folder", default="kitti")
162 | parser.add_argument("--max_depth", type=float, help="maximum depth value", default=80.0)
163 | opts = parser.parse_args()
164 |
165 | run_inference(opts)
166 | eval(opts)
167 |
--------------------------------------------------------------------------------
/single_inference/test_nyu.py:
--------------------------------------------------------------------------------
1 | """
2 | Evaluate the model using NYU v2 dataset
3 | - download splits.mat file from http://horatio.cs.nyu.edu/mit/silberman/indoor_seg_sup/splits.mat
4 | - download dataset from http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat
5 | """
6 | import argparse
7 | import os
8 |
9 | import cv2
10 | import h5py
11 | import numpy as np
12 | import tensorflow as tf
13 | from scipy.io import loadmat
14 | from tqdm import tqdm
15 |
16 | from eval_utils import compute_errors, compute_scale_and_shift
17 | from network import Pydnet
18 |
19 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
20 |
21 |
22 | class NYUDataloader:
23 | """Dataloader for NYU v2"""
24 |
25 | def __init__(self, params):
26 | self.params = params
27 | self.height = params["height"]
28 | self.width = params["width"]
29 | self.img_dir = params["labels"]
30 | self.labels_file = params["splits"]
31 | self.num_workers = 1
32 | self.num_samples = 0
33 |
34 | def preprocess(self, img0):
35 | """Prepare single image at testing time"""
36 | img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA)
37 | img0.set_shape([self.height, self.width, 3])
38 | img0 = img0 / 255.0
39 | return img0
40 |
41 | def create_iterator(self, num_parallel_calls=4):
42 | self.nyu_generator = NYUGenerator(self.img_dir, self.labels_file)
43 | dataset = tf.data.Dataset.from_generator(
44 | self.nyu_generator,
45 | output_types=tf.float32,
46 | output_shapes=[480, 640, 3],
47 | )
48 | dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls)
49 | dataset = dataset.batch(1)
50 | dataset = dataset.repeat()
51 | iterator = dataset.make_initializable_iterator()
52 | return iterator
53 |
54 |
55 | class NYUGenerator:
56 | """
57 | Read NYU testing split from mat files
58 | Adapted from https://gist.github.com/ranftlr/a1c7a24ebb24ce0e2f2ace5bce917022
59 | """
60 |
61 | def __init__(self, data_path, label_file):
62 | if not os.path.exists(data_path):
63 | raise ValueError(f"Cannot find {data_path}")
64 | if not os.path.exists(label_file):
65 | raise ValueError(f"Cannot find {label_file}")
66 | self.data_path = data_path
67 | self.label_file = label_file
68 |
69 | def __call__(self):
70 | mat = loadmat(self.label_file)
71 | indices = [ind[0] - 1 for ind in mat["testNdxs"]]
72 |
73 | with h5py.File(self.data_path, "r") as f:
74 | for ind in indices:
75 | yield np.swapaxes(f["images"][ind], 0, 2)
76 |
77 | def read_gt_files(self):
78 | """Load ground truth maps
79 | Adapted from https://gist.github.com/ranftlr/a1c7a24ebb24ce0e2f2ace5bce917022
80 | """
81 | depth_list = []
82 | name_list = []
83 |
84 | mat = loadmat(self.label_file)
85 | indices = [ind[0] - 1 for ind in mat["testNdxs"]]
86 |
87 | with h5py.File(self.data_path, "r") as f:
88 | for ind in indices:
89 | name_list.append(str(ind))
90 | depth_list.append(np.swapaxes(f["rawDepths"][ind], 0, 1))
91 | return name_list, depth_list
92 |
93 |
94 | def run_inference(opts):
95 | """Run the model on NYU v2 dataset"""
96 | network_params = {"height": 320, "width": 640, "is_training": False}
97 | dataset_params = {"height": 320, "width": 640, "labels": opts.labels, "splits": opts.splits}
98 | dataset = NYUDataloader(dataset_params)
99 | num_elements = 654
100 |
101 | iterator = dataset.create_iterator()
102 | batch_img = iterator.get_next()
103 |
104 | network = Pydnet(network_params)
105 | predicted_idepth = network.forward(batch_img)
106 | predicted_idepth = tf.nn.relu(predicted_idepth)
107 |
108 | # restore graph
109 | saver = tf.train.Saver()
110 | sess = tf.Session()
111 | sess.run(tf.compat.v1.global_variables_initializer())
112 | sess.run(iterator.initializer)
113 | saver.restore(sess, opts.ckpt)
114 |
115 | os.makedirs(opts.dest, exist_ok=True)
116 |
117 | with tqdm(total=num_elements) as pbar:
118 | for i in range(num_elements):
119 | idepth = sess.run(predicted_idepth)
120 | idepth = np.squeeze(idepth)
121 | min_idepth = idepth.min()
122 | max_idepth = idepth.max()
123 | norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth)
124 | norm_idepth *= 255.0
125 |
126 | norm_idepth = cv2.resize(norm_idepth, (640, 480)) # nyu images are 640x480
127 | img_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
128 | cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16))
129 | pbar.update(1)
130 | print("Inference done!")
131 |
132 |
133 | def eval(opts):
134 | """Compute error metrics."""
135 | nyu = NYUGenerator(data_path=opts.labels, label_file=opts.splits)
136 | errors = []
137 | test_images, gt_depths = nyu.read_gt_files()
138 |
139 | with tqdm(total=len(test_images)) as pbar:
140 | for index in range(len(test_images)):
141 | test_img = f"{str(index).zfill(4)}.png"
142 | target = gt_depths[index]
143 |
144 | pred_path = os.path.join(opts.dest, test_img)
145 | prediction_idepth = cv2.imread(pred_path, -1) / 256.0
146 |
147 | mask = (target > 0) & (target < opts.max_depth)
148 |
149 | target_idepth = np.zeros_like(target)
150 | target_idepth[mask == 1] = 1.0 / target[mask == 1]
151 | scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask)
152 | prediction_idepth_aligned = scale * prediction_idepth + shift
153 |
154 | disparity_cap = 1.0 / opts.max_depth
155 | prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap
156 | prediciton_depth_aligned = 1.0 / prediction_idepth_aligned
157 |
158 | prediciton_depth_aligned = prediciton_depth_aligned[mask == 1]
159 | target = target[mask == 1]
160 | errors.append(compute_errors(target, prediciton_depth_aligned))
161 |
162 | pbar.update(1)
163 |
164 | mean_errors = np.array(errors).mean(0)
165 | labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
166 | for i in range(len(labels)):
167 | print(f"{labels[i]}:{mean_errors[i]}")
168 |
169 | print("Evaluation done!")
170 |
171 |
172 | if __name__ == "__main__":
173 | parser = argparse.ArgumentParser(description="Evaluate depth network on NYU v2")
174 | parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
175 | parser.add_argument(
176 | "--labels", type=str, help="path to dataset", default="nyu_depth_v2_labeled.mat"
177 | )
178 | parser.add_argument("--splits", type=str, help="path to splits", default="splits.mat")
179 | parser.add_argument("--dest", type=str, help="prediction folder", default="nyu")
180 | parser.add_argument("--max_depth", type=float, help="maximum depth value", default=10.0)
181 |
182 | opts = parser.parse_args()
183 |
184 | run_inference(opts)
185 | eval(opts)
186 |
--------------------------------------------------------------------------------
/single_inference/test_tum.py:
--------------------------------------------------------------------------------
1 | """
2 | Evaluate the model using TUM dataset
3 | - run the script https://github.com/google/mannequinchallenge/blob/master/fetch_tum_data.sh
4 | """
5 | import argparse
6 | import os
7 |
8 | import cv2
9 | import h5py
10 | import numpy as np
11 | import tensorflow as tf
12 | from tqdm import tqdm
13 |
14 | from eval_utils import compute_errors, compute_scale_and_shift
15 | from network import Pydnet
16 |
17 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
18 |
19 |
20 | class TUMDataloader:
21 | """Test TUM dataset"""
22 |
23 | def __init__(self, params):
24 | self.params = params
25 | self.height = params["height"]
26 | self.width = params["width"]
27 | self.data_path = params["data_path"]
28 | self.num_workers = 4
29 | self.data_list_file = params["data_list_file"]
30 | self.data_list = np.loadtxt(self.data_list_file, dtype=bytes).astype(np.str)
31 | self.default_img_shape = [384, 512, 3]
32 |
33 | def preprocess(self, img0):
34 | """Prepare single image at testing time"""
35 | img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA)
36 | img0.set_shape([self.height, self.width, 3])
37 | return img0
38 |
39 | def create_iterator(self, num_parallel_calls=4):
40 | """ """
41 | self.tum_generator = TUMGenerator(self.data_path, self.data_list)
42 | dataset = tf.data.Dataset.from_generator(
43 | self.tum_generator,
44 | output_types=tf.float32,
45 | output_shapes=self.default_img_shape,
46 | )
47 | dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls)
48 | dataset = dataset.batch(1)
49 | dataset = dataset.repeat()
50 | iterator = dataset.make_initializable_iterator()
51 | return iterator
52 |
53 |
54 | class TUMGenerator:
55 | def __init__(self, data_path, test_files):
56 | self.data_path = data_path
57 | self.test_files = test_files
58 |
59 | def __call__(self):
60 | for f in self.test_files:
61 | test_img_path = os.path.join(self.data_path, f)
62 | with h5py.File(test_img_path, "r") as test_img:
63 | img = test_img.get("/gt/img_1")
64 | img = np.float32(np.array(img))
65 | yield img
66 |
67 | def read_gt_files(self):
68 | targets = {}
69 | samples = None
70 | with open(self.test_files, "r") as f:
71 | samples = f.readlines()
72 |
73 | for sample in samples:
74 | sample = sample.strip()
75 | test_img_path = os.path.join(self.data_path, sample)
76 | name = sample.replace(".jpg.h5", "")
77 | with h5py.File(test_img_path, "r") as test_img_h5:
78 | target = test_img_h5.get("/gt/gt_depth")
79 | target = np.float32(np.array(target))
80 | targets[name] = target
81 | return targets
82 |
83 |
84 | def run_inference(opts):
85 | """Run the model on TUM dataset"""
86 | network_params = {"height": 320, "width": 640, "is_training": False}
87 | dataset_params = {
88 | "height": 320,
89 | "width": 640,
90 | "data_path": opts.data_path,
91 | "data_list_file": opts.data_list_file,
92 | }
93 | dataset = TUMDataloader(dataset_params)
94 |
95 | iterator = dataset.create_iterator()
96 | batch_img = iterator.get_next()
97 |
98 | network = Pydnet(network_params)
99 | predicted_idepth = network.forward(batch_img)
100 | predicted_idepth = tf.nn.relu(predicted_idepth)
101 |
102 | # restore graph
103 | saver = tf.train.Saver()
104 | sess = tf.Session()
105 | sess.run(tf.compat.v1.global_variables_initializer())
106 | sess.run(iterator.initializer)
107 | saver.restore(sess, opts.ckpt)
108 |
109 | os.makedirs(opts.dest, exist_ok=True)
110 |
111 | names = None
112 | with open(opts.data_list_file, "r") as f:
113 | names = f.readlines()
114 | names = [n.strip().replace(".jpg.h5", "") for n in names]
115 | num_lines = len(names)
116 |
117 | with tqdm(total=num_lines) as pbar:
118 | for i in range(num_lines):
119 | idepth = sess.run(predicted_idepth)
120 | idepth = np.squeeze(idepth)
121 | min_idepth = idepth.min()
122 | max_idepth = idepth.max()
123 | norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth)
124 | norm_idepth *= 255.0
125 |
126 | norm_idepth = cv2.resize(norm_idepth, (512, 384))
127 | img_path = os.path.join(opts.dest, f"{names[i]}.png")
128 | cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16))
129 | pbar.update(1)
130 | print("Inference done!")
131 |
132 |
133 | def eval(opts):
134 | """Compute error metrics."""
135 | tum = TUMGenerator(data_path=opts.data_path, test_files=opts.data_list_file)
136 | errors = []
137 | gt_depths = tum.read_gt_files()
138 | num_lines = sum(1 for _ in open(opts.data_list_file, "r"))
139 |
140 | with open(opts.data_list_file, "r") as f:
141 | for sample in tqdm(f, total=num_lines):
142 | sample = sample.strip().replace(".jpg.h5", "")
143 | target = gt_depths[sample]
144 | pred_path = os.path.join(opts.dest, f"{sample}.png")
145 | prediction_idepth = cv2.imread(pred_path, -1) / 256.0
146 |
147 | mask = (target > 0) & (target < opts.max_depth)
148 | target_idepth = np.zeros_like(target)
149 | target_idepth[mask == 1] = 1.0 / target[mask == 1]
150 | scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask)
151 | prediction_idepth_aligned = scale * prediction_idepth + shift
152 |
153 | disparity_cap = 1.0 / opts.max_depth
154 | prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap
155 | prediciton_depth_aligned = 1.0 / prediction_idepth_aligned
156 |
157 | prediciton_depth_aligned = prediciton_depth_aligned[mask == 1]
158 | target = target[mask == 1]
159 | errors.append(compute_errors(target, prediciton_depth_aligned))
160 |
161 | mean_errors = np.array(errors).mean(0)
162 | labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
163 | for i in range(len(labels)):
164 | print(f"{labels[i]}:{mean_errors[i]}")
165 |
166 | print("Evaluation done!")
167 |
168 |
169 | if __name__ == "__main__":
170 | parser = argparse.ArgumentParser(description="Evaluate depth network on TUM")
171 | parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
172 | parser.add_argument("--data_path", type=str, help="path to TUM data", required=True)
173 | parser.add_argument(
174 | "--data_list_file", type=str, help="path to list files", default="test_tum.txt"
175 | )
176 | parser.add_argument("--dest", type=str, help="prediction folder", default="tum")
177 | parser.add_argument("--max_depth", type=float, help="maximum depth value", default=10.0)
178 |
179 | opts = parser.parse_args()
180 |
181 | run_inference(opts)
182 | eval(opts)
183 |
--------------------------------------------------------------------------------