├── .dockerignore ├── .formatter.exs ├── .gitattributes ├── .gitignore ├── .tool-versions ├── LICENSE.txt ├── Procfile ├── README.md ├── apps ├── emporium_environment │ ├── config │ │ ├── config.exs │ │ ├── dev.exs │ │ ├── prod.exs │ │ └── test.exs │ ├── lib │ │ └── emporium_environment │ │ │ ├── application.ex │ │ │ ├── cluster │ │ │ ├── strategy │ │ │ │ └── local.ex │ │ │ └── supervisor.ex │ │ │ ├── endpoint.ex │ │ │ ├── horde.ex │ │ │ └── horde │ │ │ ├── client.ex │ │ │ └── tracker.ex │ └── mix.exs ├── emporium_inference │ ├── lib │ │ └── emporium_inference │ │ │ ├── image.ex │ │ │ └── image_conversion.ex │ └── mix.exs ├── emporium_inference_resnet │ ├── lib │ │ ├── emporium_inference_resnet.ex │ │ └── emporium_inference_resnet │ │ │ ├── application.ex │ │ │ └── serving.ex │ └── mix.exs ├── emporium_inference_yolov5 │ ├── .gitignore │ ├── CMakeLists.txt │ ├── Makefile │ ├── c_src │ │ └── runner.cc │ ├── lib │ │ ├── emporium_inference_yolov5.ex │ │ └── emporium_inference_yolov5 │ │ │ ├── acceptor.ex │ │ │ ├── application.ex │ │ │ ├── broker.ex │ │ │ ├── request.ex │ │ │ └── runner.ex │ ├── mix.exs │ └── test │ │ ├── inference_test.exs │ │ └── test_helper.exs ├── emporium_nexus │ ├── config │ │ └── config.exs │ ├── lib │ │ └── emporium_nexus │ │ │ ├── application.ex │ │ │ ├── config.ex │ │ │ ├── inference_endpoint.ex │ │ │ ├── inference_requestor.ex │ │ │ ├── inference_session.ex │ │ │ ├── inference_sink.ex │ │ │ ├── key_server.ex │ │ │ ├── video_format_tracker.ex │ │ │ ├── video_orientation_extension.ex │ │ │ └── video_orientation_tracker.ex │ └── mix.exs ├── emporium_proxy │ ├── config │ │ └── config.exs │ ├── lib │ │ ├── emporium_proxy.ex │ │ └── emporium_proxy │ │ │ ├── application.ex │ │ │ ├── cowboy.ex │ │ │ ├── router.ex │ │ │ └── websocket_handler.ex │ └── mix.exs └── emporium_web │ ├── .formatter.exs │ ├── .gitignore │ ├── assets │ ├── css │ │ ├── screen-emporium.css │ │ └── screen-generic.css │ ├── esbuild.js │ ├── js │ │ └── screen-generic.js │ ├── package-lock.json │ └── package.json │ ├── config │ ├── config.exs │ ├── dev.exs │ ├── prod.exs │ └── test.exs │ ├── lib │ ├── emporium_web.ex │ └── emporium_web │ │ ├── application.ex │ │ ├── controllers │ │ └── page_controller.ex │ │ ├── endpoint.ex │ │ ├── gettext.ex │ │ ├── presence.ex │ │ ├── router.ex │ │ ├── session_live.ex │ │ ├── telemetry.ex │ │ ├── templates │ │ ├── layout │ │ │ ├── app.html.heex │ │ │ ├── live.html.heex │ │ │ └── root.html.heex │ │ └── page │ │ │ └── index.html.heex │ │ └── views │ │ ├── error_helpers.ex │ │ ├── error_view.ex │ │ ├── layout_view.ex │ │ └── page_view.ex │ ├── mix.exs │ └── priv │ └── gettext │ ├── default.pot │ ├── en │ └── LC_MESSAGES │ │ ├── default.po │ │ └── errors.po │ └── errors.pot ├── bin ├── compile ├── console ├── run ├── setup └── test ├── config ├── config.exs ├── dev.env.template ├── dev.exs ├── prod.exs ├── releases.exs ├── runtime.exs └── test.exs ├── dialyzer-ignore-warnings.exs ├── infra └── foreman-app │ ├── console.sh │ ├── start.sh │ └── test.sh ├── mix.exs ├── mix.lock ├── rel ├── env.sh.eex ├── remote.vm.args.eex └── vm.args.eex └── vendor ├── setup-clang.sh ├── setup-coco.sh ├── setup-cudnn.sh ├── setup-libtorch.sh └── setup-opencv.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | */**/.DS_Store 2 | .git 3 | .tm_properties 4 | _build 5 | elixir_buildpack.config 6 | phoenix_static_buildpack_compile 7 | phoenix_static_buildpack.config 8 | Procfile 9 | apps/*/*.dump 10 | apps/*/cover 11 | apps/*/assets/node_modules 12 | apps/*/priv/static 13 | apps/*/ext/libtorch-* 14 | bin 15 | config/dev.* 16 | config/test.* 17 | !config/test.exs 18 | config/*.env 19 | deps 20 | doc 21 | infra/aws* 22 | priv/keys 23 | releases/* 24 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | import_deps: [:phoenix, :membrane_core], 3 | inputs: [ 4 | "apps/*/{lib,config,test}/**/*.{ex,exs}", 5 | "apps/*/priv/*/seeds.exs", 6 | "apps/*/mix.exs", 7 | "*.{ex,exs}", 8 | "{config,lib,scripts,test}/**/*.{ex,exs}" 9 | ], 10 | subdirectories: ["priv/*/migrations"] 11 | ] 12 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | vendor/cache/cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.beam 3 | *.ez 4 | /*.ez 5 | /.fetch 6 | /.vscode 7 | /apps/*/cover 8 | /apps/*/assets/node_modules 9 | /apps/*/assets/.fontcustom-manifest.json 10 | /apps/*/priv/static/ 11 | !/apps/*/priv/static/.keep 12 | /config/*.env 13 | /config/*.secret.exs 14 | /cover 15 | /db 16 | /deps 17 | /doc 18 | /models 19 | /priv/cert/* 20 | /priv/keys/* 21 | /releases 22 | /vendor/* 23 | !/vendor/*.sh 24 | !/vendor/cache/* 25 | /_build 26 | erl_crash.dump 27 | npm-debug.log 28 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | nodejs 17.3.0 2 | erlang 25.3 3 | elixir 1.14.3-otp-25 4 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | web: env ROLE=WEB ./infra/foreman-app/start.sh 2 | ngrok: ngrok http $(($PORT - 100)) --subdomain $NGROK_SUBDOMAIN --region eu -log stdout 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hot Dog Emporium 2 | 3 | End-to-end showcase of MLOps for Elixir ecosystem. 4 | 5 | ## Preamble 6 | 7 | There is a scarcity of end-to-end [ModelOps](https://en.wikipedia.org/wiki/ModelOps) projects made available to aid understanding of the state of the art and to demonstrate how such solutions may be deployed on custom infrastructure. 8 | 9 | Therefore the Emporium was erected with the purpose of illustrating practical implementation techniques for Elixir teams, and secondarily to assess end-to-end performance of such implementations, which will surely become popular over time. 10 | 11 | The Emporium assumes the following canonical architecture: 12 | 13 | - x86_64 system, macOS or Linux 14 | - Optional Nvidia CUDA-capable GPU 15 | 16 | For more context, check out: 17 | 18 | - [“Not Hotdog” Revisited, ElixirConf EU 2023](https://speakerdeck.com/evadne/not-hotdog-revisited) 19 | 20 | ## Preparing the System 21 | 22 | The application requires the following dependencies: 23 | 24 | 1. LLVM / Clang (Run: `./vendor/setup-clang.sh`) 25 | 26 | 2. CMake (Run: `apt-get install cmake`) 27 | 28 | 3. LibTorch (run `./vendor/setup-libtorch.sh`) 29 | 30 | 4. FFMpeg (Run: `apt-get install ffmpeg`) 31 | 32 | 5. OpenCV (Run: `./vendor/setup-libtorch.sh`) 33 | 34 | 6. Nvidia’s CUDA Toolkit (if using GPU) 35 | 36 | ## Preparing the Models 37 | 38 | This application comes with 2 models: 39 | 40 | 1. Image Classification via ResNet-50 41 | 42 | 2. Object Detection via YOLOv5 43 | 44 | The former is pulled by Bumblebee therefore no action is required. 45 | 46 | For the latter, clone YOLOv5 and export the pre-trained model: 47 | 48 | $ git clone https://github.com/ultralytics/yolov5 ultralytics-yolov5 49 | $ cd ultralytics-yolov5; asdf local python 3.8.5; cd -; cd - 50 | $ pip install -r requirements.txt 51 | $ python3 export.py --weights yolov5s.pt --include torchscript 52 | 53 | Then copy `yolov5s.torchscript` to: 54 | 55 | apps/emporium_inference_yolov5/priv/yolov5s.torchscript 56 | 57 | You can also use other sizes by configuration: 58 | 59 | config :emporium_runner_yolov5, model_name: "yolov5x.torchscript" 60 | 61 | ## Architecture 62 | 63 | The system is split into the following sections: 64 | 65 | The **Emporium Environment** application is responsible for foundational services, including node clustering (enabling Erlang Distribution between nodes), and setting up of facilities that would allow custom Horde supervisors in other applications to function correctly. 66 | 67 | The **Emporium Proxy** application provides entry point for HTTP traffic (TLS is offloaded to the load balancer) and manages proxying of all connections to other applications such as Emporium Web, or if required, an Admin app in the future. 68 | 69 | The **Emporium Web** application provides the entry point, and hosts the Session LiveView, which is the main interaction element. 70 | 71 | The **Emporium Nexus** application provides WebRTC ingress and orchestration for inference workloads. It hosts the Membrane framework, exposes RTP endpoints. and allows WebRTC connections to be made to the application cluster. 72 | 73 | The **Emporium Inference** application is a façade, which holds image conversion logic, via Evision, which is used in featurisation (pre-processing). 74 | 75 | The **Emporium Inference (YOLOv5)** application provides Object Detection capabilities via YOLOv5, orchestrated via Sbroker, and implemented with PyTorch using a custom C++ program. 76 | 77 | The **Emporium Inference (ResNet)** application provides Image Classification capabilities via Bumblebee and the `microsoft/resnet-50` model. 78 | 79 | ## Installation & Setup 80 | 81 | To prepare the environment, install the prerequisites above, then install the CUDA Toolkit which you can find at [CUDA Downloads](https://developer.nvidia.com/cuda-downloads), if you are using an NVidia GPU on Linux. 82 | 83 | Note if you are using WSL 2, then select Linux → x86_64 → WSL-Ubuntu → 2.0. It is critical not to use normal versions for Ubuntu as that would install drivers, which are not necessary for WSL 2, where the driver is already installed on Windows side. 84 | 85 | To change between Object Recognition and Image Classification, modify `EmporiumNexus.InferenceSink`. 86 | 87 | At a bare minimum, you should configure `config/dev.env` with the template at `config/dev.env.template`: 88 | 89 | - `NGROK_SUBDOMAIN`: Is used when running ngrok as specified in Procfile 90 | - `SECRET_KEY_BASE`: Is used for Phoenix dead & live views 91 | - `TURN_IP`, `TURN_MOCK_IP` should be publicly routable from your clients for TCP/UDP TURN traffic 92 | - `TURN_PORT_UDP_FROM`, `TURN_PORT_UDP_TO` should be a high range e.g. 50000 - 65535 93 | ` `HOST` should be your ngrok URL 94 | 95 | You may run into some problems which are documented in “Common Problems”. 96 | 97 | ## Pending Tasks 98 | 99 | - Investigate real time display of metrics 100 | 101 | - Investigate YOLOv8 102 | 103 | - Investigate operational pattern for TensorRT 104 | 105 | - Investigate deployment topology / AWS CFN 106 | 107 | - Investigate IPv6 usage 108 | 109 | ## Common Problems 110 | 111 | ### Unable to acquire Media Devices 112 | 113 | This may be related to whether you have used HTTPS or not. WebRTC requires a [secure context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts) and the fastest way to acquire full compliance would be to configure and use [ngrok](https://ngrok.com) then use the ngrok domain for all testing purposes. 114 | 115 | For local deployment, consider CloudFlare Zero Trust / CloudFlare Access. 116 | 117 | ### Unable to compile FastTLS dependency 118 | 119 | FastTLS is a dependency of the Membrane Framework. 120 | 121 | On Linux, as long as `pkg-config` exists, it should be found automatically, but this can be a problem on macOS. It is a [known problem](https://github.com/membraneframework/membrane_videoroom#known-issues) and the workaround is as follows: 122 | 123 | export LDFLAGS="-L/usr/local/opt/openssl/lib" 124 | export CFLAGS="-I/usr/local/opt/openssl/include/" 125 | export CPPFLAGS="-I/usr/local/opt/openssl/include/" 126 | export PKG_CONFIG_PATH="/usr/local/opt/openssl@3/lib/pkgconfig:$PKG_CONFIG_PATH" 127 | mix deps.compile fast_tls 128 | 129 | ### Unable to compile YOLOv5 Runner 130 | 131 | When no `CMAKE_CUDA_COMPILER` could be found, it may be due to improper configuration of the CUDA Toolkit. 132 | 133 | Consider adding the following to `~/.zshrc` or equivalent: 134 | 135 | export CUDA_HOME=/usr/local/cuda 136 | export PATH=$CUDA_HOME/bin:$PATH 137 | 138 | Once `nvcc` can be found, this problem resolves itself. 139 | 140 | ### Unable to establish WebRTC Connections 141 | 142 | Membrane’s WebRTC implementation includes an integrated TURN server, so you should set both `TURN_IP` and `TURN_MOCK_IP`… 143 | 144 | - `TURN_IP` is the IP on the interface that the TURN server listens to 145 | - `TURN_MOCK_IP` is the IP that is presented to the client 146 | 147 | …to publicly routable IPs. 148 | 149 | See the following post for more information: 150 | 151 | - [How we made Membrane SFU less ICE-y](https://medium.com/membraneframework/how-we-made-membrane-sfu-less-ice-y-9625472ec386) 152 | 153 | ### YOLOv5 — PyTorch / nvFuser issue 154 | 155 | This is under investigation, a workaround has been put in place ([issue](https://github.com/pytorch/pytorch/issues/99781)) 156 | 157 | If not using a 40-series card, you may downgrade libTorch to 1.3.x by: 158 | 159 | rm -rf ./vendor/libtorch 160 | LIBTORCH_VERSION=1.3.0 ./vendor/setup-libtorch.sh 161 | -------------------------------------------------------------------------------- /apps/emporium_environment/config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :emporium_environment, EmporiumEnvironment, strategies: [] 4 | 5 | config :opentelemetry, :resource, 6 | service: [ 7 | name: "emporium", 8 | namespace: "emporium" 9 | ] 10 | 11 | config :opentelemetry, 12 | processors: [ 13 | otel_batch_processor: %{ 14 | exporter: {:otel_exporter_stdout, []} 15 | } 16 | ] 17 | 18 | # config :opentelemetry, traces_exporter: :none 19 | 20 | import_config "#{config_env()}.exs" 21 | -------------------------------------------------------------------------------- /apps/emporium_environment/config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :emporium_environment, EmporiumEnvironment, strategies: [:local] 4 | -------------------------------------------------------------------------------- /apps/emporium_environment/config/prod.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :emporium_environment, EmporiumEnvironment, strategies: [:fly] 4 | -------------------------------------------------------------------------------- /apps/emporium_environment/config/test.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | -------------------------------------------------------------------------------- /apps/emporium_environment/lib/emporium_environment/application.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnvironment.Application do 2 | @moduledoc false 3 | use Application 4 | 5 | def start(_type, _args) do 6 | children = [ 7 | EmporiumEnvironment.Horde.Tracker, 8 | EmporiumEnvironment.Cluster.Supervisor 9 | ] 10 | 11 | opts = [strategy: :one_for_one, name: EmporiumEnvironment.Supervisor] 12 | Supervisor.start_link(children, opts) 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /apps/emporium_environment/lib/emporium_environment/cluster/strategy/local.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnviornment.Cluster.Strategy.Local do 2 | @moduledoc """ 3 | EPMD-based Clustering strategy for libCluster, which uses net_adm to 4 | find out which names have been registered with the local EPMD instance, 5 | and then sending them to libCluster for connection. 6 | 7 | This allows forming the cluster with all locally running nodes, 8 | and is especially useful during development when ./bin/run and 9 | ./bin/console scripts already take care of starting the BEAM VM up 10 | with clustering. 11 | """ 12 | 13 | use Cluster.Strategy 14 | alias Cluster.Strategy.State 15 | require Logger 16 | 17 | def start_link(args) do 18 | GenServer.start_link(__MODULE__, args) 19 | end 20 | 21 | def init([%State{topology: topology, connect: connect, list_nodes: list_nodes}]) do 22 | {:ok, epmd_names} = :net_adm.names() 23 | epmd_nodes = Enum.map(epmd_names, &String.to_atom("#{elem(&1, 0)}@localhost")) 24 | :ok = Cluster.Strategy.connect_nodes(topology, connect, list_nodes, epmd_nodes) 25 | :ignore 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /apps/emporium_environment/lib/emporium_environment/cluster/supervisor.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnvironment.Cluster.Supervisor do 2 | use Supervisor 3 | require Logger 4 | 5 | @otp_app Mix.Project.config()[:app] 6 | @otp_env EmporiumEnvironment 7 | 8 | def start_link(args) do 9 | Supervisor.start_link(__MODULE__, args, name: __MODULE__) 10 | end 11 | 12 | @impl true 13 | def init(_) do 14 | topologies = build_topologies() 15 | 16 | cond do 17 | Enum.empty?(topologies) -> :ignore 18 | true -> Cluster.Supervisor.init([topologies, [name: __MODULE__]]) 19 | end 20 | end 21 | 22 | @strategy_base [ 23 | connect: {__MODULE__, :connect_node, []}, 24 | disconnect: {:erlang, :disconnect_node, []}, 25 | list_nodes: {:erlang, :nodes, [:connected]} 26 | ] 27 | 28 | defp build_topologies do 29 | Application.get_env(@otp_app, @otp_env) 30 | |> Keyword.fetch!(:strategies) 31 | |> Enum.flat_map(&build_topology/1) 32 | end 33 | 34 | defp build_topology(:local) do 35 | strategy = EmporiumEnviornment.Cluster.Strategy.Local 36 | [local_epmd_discovery: Keyword.merge(@strategy_base, strategy: strategy, config: [])] 37 | end 38 | 39 | defp build_topology(:fly) do 40 | with {:ok, app_name} <- System.fetch_env("FLY_APP_NAME") do 41 | strategy = Cluster.Strategy.DNSPoll 42 | config = [polling_interval: 5_000, query: "#{app_name}.internal", node_basename: app_name] 43 | [fly_dnspoll: Keyword.merge(@strategy_base, strategy: strategy, config: config)] 44 | else 45 | _ -> [] 46 | end 47 | end 48 | 49 | def connect_node(node_name) do 50 | :net_kernel.connect_node(node_name) 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /apps/emporium_environment/lib/emporium_environment/endpoint.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnvironment.Endpoint do 2 | @moduledoc """ 3 | Resolves the correct configuration for Phoenix Endpoints. To be used in the init function of 4 | Endpoints. 5 | """ 6 | 7 | def init(config) do 8 | template = build_template() 9 | {template_url, template} = Keyword.pop(template, :url, []) 10 | {config_url, config} = Keyword.pop(config, :url, []) 11 | url = Keyword.put(template_url, :path, Keyword.get(config_url, :path, "/")) 12 | config = config |> Keyword.merge(template) |> Keyword.put(:url, url) 13 | {:ok, config} 14 | end 15 | 16 | defp build_template do 17 | [ 18 | url: url(), 19 | secret_key_base: secret_key_base(), 20 | live_view: live_view(), 21 | check_origin: check_origin() 22 | ] 23 | end 24 | 25 | import System, only: [get_env: 1] 26 | 27 | defp url, do: [scheme: url_scheme(), host: url_host(), port: url_port(), path: "/"] 28 | defp url_scheme, do: get_env("URL_SCHEME") || "http" 29 | defp url_host, do: get_env("URL_HOST") || get_env("HOST") || "localhost" 30 | defp url_port, do: get_env("URL_PORT") || get_env("PORT") 31 | 32 | defp secret_key_base, do: get_env("SECRET_KEY_BASE") 33 | 34 | defp live_view, do: [signing_salt: live_view_signing_salt()] 35 | defp live_view_signing_salt, do: get_env("LIVE_VIEW_SIGNING_SALT") || secret_key_base() 36 | 37 | with :prod <- Mix.env() do 38 | defp url_hosts do 39 | Enum.uniq([url_host() | url_alternate_hosts()]) 40 | end 41 | 42 | defp url_alternate_hosts do 43 | System.get_env("URL_ALTERNATE_HOSTS", "") |> String.split(",", trim: true) 44 | end 45 | 46 | defp check_origin do 47 | Enum.map(url_hosts(), &("//" <> &1)) 48 | end 49 | else 50 | _ -> defp check_origin, do: false 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /apps/emporium_environment/lib/emporium_environment/horde.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnvironment.Horde do 2 | @moduledoc """ 3 | Convenience module to look up or start a named GenServer within 4 | the target Horde, for inclusion in Horde-using applications. 5 | 6 | By convention, each OTP application using Horde Supervisor / Registry 7 | modules will hang them under their own namespace such as… 8 | 9 | - EmporiumAccess.Horde.Registry 10 | - EmporiumAccess.Horde.Supervisor 11 | 12 | …and this convention is respected when spawning servers. 13 | """ 14 | 15 | @callback registry_module :: module() 16 | @callback registry_key(term()) :: term() 17 | 18 | defmacro __using__(options) do 19 | {:ok, namespace} = Keyword.fetch(options, :namespace) 20 | namespace = Macro.expand(namespace, __CALLER__) 21 | supervisor = Module.concat(namespace, Horde.Supervisor) 22 | registry = Module.concat(namespace, Horde.Registry) 23 | 24 | quote bind_quoted: [parent: __MODULE__, supervisor: supervisor, registry: registry] do 25 | def ensure_started(id) do 26 | unquote(parent).ensure_started(unquote(supervisor), __MODULE__, id) 27 | end 28 | 29 | def child_spec(term) do 30 | %{start: {__MODULE__, :start_link, [term]}, restart: :transient} 31 | end 32 | 33 | @behaviour parent 34 | @before_compile {parent, :__build_start_link__} 35 | 36 | @impl parent 37 | def registry_module, do: unquote(registry) 38 | 39 | @impl parent 40 | def registry_key(term), do: term 41 | 42 | defoverridable parent 43 | end 44 | end 45 | 46 | defmacro __build_start_link__(env) do 47 | unless Module.defines?(env.module, {:start_link, 1}) do 48 | behaviours = Module.get_attribute(env.module, :behaviour) 49 | 50 | if Enum.member?(behaviours, GenServer) do 51 | quote do 52 | def start_link(term) do 53 | name = {:via, Horde.Registry, {registry_module(), registry_key(term)}} 54 | GenServer.start_link(__MODULE__, term, name: name) 55 | end 56 | end 57 | else 58 | reason = "required by behaviour EmporiumEnvironment.Horde" 59 | 60 | raise UndefinedFunctionError, 61 | module: env.module, 62 | function: :start_link, 63 | arity: 1, 64 | message: "heh", 65 | reason: reason 66 | end 67 | end 68 | end 69 | 70 | def ensure_started(supervisor, module, term) do 71 | answer = lookup(module, term) || start(supervisor, module, term) 72 | 73 | case answer do 74 | pid when is_pid(pid) -> {:ok, pid} 75 | {:ok, pid} -> {:ok, pid} 76 | {:error, {:already_started, pid}} -> {:ok, pid} 77 | {:error, {:shutdown, reason}} -> {:error, reason} 78 | {:error, reason} -> {:error, reason} 79 | end 80 | end 81 | 82 | defp lookup(module, term) do 83 | registry = module.registry_module() 84 | key = module.registry_key(term) 85 | 86 | case Horde.Registry.lookup(registry, key) do 87 | [{pid, _}] -> pid 88 | _ -> nil 89 | end 90 | end 91 | 92 | defp start(supervisor, module, term) do 93 | Horde.DynamicSupervisor.start_child(supervisor, {module, term}) 94 | end 95 | end 96 | -------------------------------------------------------------------------------- /apps/emporium_environment/lib/emporium_environment/horde/client.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnvironment.Horde.Client do 2 | @moduledoc """ 3 | The Horde Client lives in Supervision trees elsewhere (for example under 4 | EmporiumAccess.Supervisor), and is responsible for joining Hordes managed 5 | by that Supervision tree with Hordes running on remote nodes. 6 | """ 7 | 8 | use GenServer 9 | require Logger 10 | 11 | defmodule State do 12 | @moduledoc false 13 | @type t :: %__MODULE__{target: module()} 14 | defstruct target: nil 15 | end 16 | 17 | def start_link(options) do 18 | {_, name} = List.keyfind(options, :name, 0) 19 | {_, target} = List.keyfind(options, :target, 0) 20 | GenServer.start_link(__MODULE__, target, name: name) 21 | end 22 | 23 | def init(target) do 24 | :ok = GenServer.call(EmporiumEnvironment.Horde.Tracker, {:add_subscriber, self()}) 25 | _ = join_hordes(target, Node.list()) 26 | {:ok, %State{target: target}} 27 | end 28 | 29 | def handle_cast({:nodes_updated, node_names}, state) do 30 | _ = join_hordes(state.target, node_names) 31 | {:noreply, state} 32 | end 33 | 34 | defp join_hordes(target, node_names) do 35 | registry_result = target |> Module.concat(Horde.Registry) |> set_members(node_names) 36 | supervisor_result = target |> Module.concat(Horde.Supervisor) |> set_members(node_names) 37 | {registry_result, supervisor_result} 38 | end 39 | 40 | defp set_members(supervisor_name, node_names) do 41 | remote_members = for node_name <- node_names, do: {supervisor_name, node_name} 42 | members = [supervisor_name | remote_members] 43 | result = Horde.Cluster.set_members(supervisor_name, members) 44 | _ = handle_set_members_result(supervisor_name, node_names, result) 45 | result 46 | end 47 | 48 | defp handle_set_members_result(horde_name, node_names, :ok) do 49 | Logger.info(fn -> 50 | "set members for #{horde_name} to #{inspect(node_names)}" 51 | end) 52 | end 53 | 54 | defp handle_set_members_result(horde_name, node_names, {:error, reason}) do 55 | Logger.error(fn -> 56 | "unable to set members for #{horde_name} to #{inspect(node_names)}: #{inspect(reason)})" 57 | end) 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /apps/emporium_environment/lib/emporium_environment/horde/tracker.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnvironment.Horde.Tracker do 2 | @moduledoc """ 3 | The Horde Tracker is a resopnsible for messaging all of its subscribers when 4 | a new node has been added. The subscribers (__MODULE__.Horde.Client) will then 5 | in turn have their targets join Hordes. 6 | """ 7 | 8 | use GenServer 9 | 10 | defmodule State do 11 | defstruct subscribers: [], nodes: [] 12 | end 13 | 14 | def start_link(_) do 15 | GenServer.start_link(__MODULE__, [], name: __MODULE__) 16 | end 17 | 18 | def init(_) do 19 | :ok = :net_kernel.monitor_nodes(true, node_type: :visible) 20 | {:ok, %State{}} 21 | end 22 | 23 | def handle_call({:add_subscriber, pid}, _from, state) when is_pid(pid) do 24 | monitor_ref = Process.monitor(pid) 25 | to_subscribers = [{monitor_ref, pid} | state.subscribers] 26 | to_state = %{state | subscribers: to_subscribers} 27 | {:reply, :ok, to_state} 28 | end 29 | 30 | def handle_info({:DOWN, ref, :process, object, _}, state) do 31 | to_subscribers = state.subscribers |> List.delete({ref, object}) 32 | to_state = %{state | subscribers: to_subscribers} 33 | {:noreply, to_state} 34 | end 35 | 36 | def handle_info({:nodeup, node_name, _}, state) do 37 | to_nodes = [node_name | state.nodes] 38 | _ = broadcast(state.subscribers, {:nodes_updated, to_nodes}) 39 | {:noreply, %{state | nodes: to_nodes}} 40 | end 41 | 42 | def handle_info({:nodedown, node_name, _}, state) do 43 | to_nodes = List.delete(state.nodes, node_name) 44 | _ = broadcast(state.subscribers, {:nodes_updated, to_nodes}) 45 | {:noreply, %{state | nodes: to_nodes}} 46 | end 47 | 48 | defp broadcast(subscribers, message) do 49 | for {_, pid} <- subscribers do 50 | GenServer.cast(pid, message) 51 | end 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /apps/emporium_environment/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumEnvironment.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :emporium_environment, 7 | version: "0.1.0", 8 | build_path: "../../_build", 9 | config_path: "../../config/config.exs", 10 | deps_path: "../../deps", 11 | lockfile: "../../mix.lock", 12 | elixirc_paths: elixirc_paths(Mix.env()), 13 | compilers: Mix.compilers(), 14 | start_permanent: Mix.env() == :prod, 15 | deps: deps() 16 | ] 17 | end 18 | 19 | def application do 20 | [ 21 | mod: {EmporiumEnvironment.Application, []}, 22 | extra_applications: [:logger, :runtime_tools] 23 | ] 24 | end 25 | 26 | defp elixirc_paths(:test), do: ["lib", "test/support"] 27 | defp elixirc_paths(_), do: ["lib"] 28 | 29 | defp deps do 30 | [ 31 | {:horde, "~> 0.8.4"}, 32 | {:libcluster, "~> 3.2.2"}, 33 | {:opentelemetry, "~> 1.0.0"}, 34 | {:opentelemetry_exporter, "~> 1.0.4"}, 35 | {:telemetry_metrics, "~> 0.6.1"}, 36 | {:telemetry_poller, "~> 1.0.0"} 37 | ] 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /apps/emporium_inference/lib/emporium_inference/image.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.Image do 2 | @moduledoc """ 3 | Defines the image sent to downstream inference implementations 4 | 5 | - `orientation`: Defines the orientation of the image as it was sent. For example, a sent 6 | orientation of 90° CCW (`:rotate_90_ccw`) would require correctional rortation of 90° CW 7 | """ 8 | 9 | @type format :: :RGB | :I420 10 | @type orientation :: :upright | :rotated_90_ccw | :rotated_180 | :rotated_90_cw 11 | 12 | @type t :: %__MODULE__{ 13 | width: non_neg_integer(), 14 | height: non_neg_integer(), 15 | format: format(), 16 | orientation: orientation(), 17 | data: binary() 18 | } 19 | 20 | defstruct width: 0, 21 | height: 0, 22 | format: :RGB, 23 | orientation: :upright, 24 | data: <<>> 25 | end 26 | -------------------------------------------------------------------------------- /apps/emporium_inference/lib/emporium_inference/image_conversion.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.ImageConversion do 2 | alias EmporiumInference.Image 3 | 4 | @spec normalise(image :: Image.t(), width :: non_neg_integer(), height :: non_neg_integer()) :: 5 | Image.t() 6 | @spec to_mat(image :: Image.t()) :: Evision.Mat.t() 7 | 8 | def normalise(%Image{} = image, width, height) do 9 | image 10 | |> to_mat() 11 | |> crop(width, height) 12 | |> rotate_from(image.orientation) 13 | |> Evision.cvtColor(Evision.Constant.cv_COLOR_BGR2RGB()) 14 | |> Evision.Mat.to_binary() 15 | |> then(fn data -> 16 | %Image{width: width, height: height, format: :RGB, orientation: :upright, data: data} 17 | end) 18 | end 19 | 20 | @doc """ 21 | Converts an `EmporiumInference.Image` directly to `Evision.Mat` 22 | """ 23 | def to_mat(image) 24 | 25 | def to_mat(%Image{format: :RGB} = image) do 26 | Evision.Mat.from_binary(image.data, {:u, 8}, image.height, image.width, 3) 27 | end 28 | 29 | def to_mat(%Image{format: :I420} = image) do 30 | Evision.Mat.from_binary(image.data, {:u, 8}, ceil(image.height * 1.5), image.width, 1) 31 | |> Evision.cvtColor(Evision.Constant.cv_COLOR_YUV420p2RGB()) 32 | end 33 | 34 | defp crop(%Evision.Mat{shape: {image_height, image_width, _}} = mat, width, height) do 35 | mat 36 | |> Evision.Mat.roi(get_scale_fit_roi(image_width, image_height, width, height)) 37 | |> Evision.resize({width, height}) 38 | end 39 | 40 | defp rotate_from(%Evision.Mat{} = mat, orientation) do 41 | case orientation do 42 | :upright -> mat 43 | :rotated_90_ccw -> Evision.rotate(mat, Evision.Constant.cv_ROTATE_90_CLOCKWISE()) 44 | :rotated_180 -> Evision.rotate(mat, Evision.Constant.cv_ROTATE_180()) 45 | :rotated_90_cw -> Evision.rotate(mat, Evision.Constant.cv_ROTATE_90_COUNTERCLOCKWISE()) 46 | end 47 | end 48 | 49 | defp get_scale_fit_roi(container_width, container_height, element_width, element_height) do 50 | scale_width = container_width / element_width 51 | scale_height = container_height / element_height 52 | scale = min(scale_width, scale_height) 53 | region_width = floor(scale * element_width) 54 | region_height = floor(scale * element_height) 55 | x = floor((container_width - region_width) / 2) 56 | y = floor((container_height - region_height) / 2) 57 | w = region_width 58 | h = region_height 59 | {x, y, w, h} 60 | end 61 | end 62 | -------------------------------------------------------------------------------- /apps/emporium_inference/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :emporium_inference, 7 | version: "0.1.0", 8 | elixir: "~> 1.13", 9 | elixirc_paths: elixirc_paths(Mix.env()), 10 | deps: deps(), 11 | start_permanent: Mix.env() == :prod, 12 | build_path: "../../_build", 13 | config_path: "../../config/config.exs", 14 | deps_path: "../../deps", 15 | lockfile: "../../mix.lock" 16 | ] 17 | end 18 | 19 | def application do 20 | [ 21 | # mod: {EmporiumInference.Application, []}, 22 | extra_applications: [:logger, :runtime_tools] 23 | ] 24 | end 25 | 26 | defp elixirc_paths(:test), do: ["lib", "test/support"] 27 | defp elixirc_paths(_), do: ["lib"] 28 | 29 | defp deps do 30 | [ 31 | {:evision, "~> 0.1.31"} 32 | ] 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /apps/emporium_inference_resnet/lib/emporium_inference_resnet.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.ResNet do 2 | alias EmporiumInference.Image 3 | alias EmporiumInference.ResNet.Serving 4 | 5 | @type classification :: { 6 | label :: String.t(), 7 | score :: float() 8 | } 9 | 10 | @type duration :: {atom() | String.t(), non_neg_integer()} 11 | 12 | @spec request(Image.t()) :: 13 | {:ok, [classification], [duration]} 14 | | {:error, reason :: atom() | String.t()} 15 | 16 | def request(image) do 17 | image 18 | |> EmporiumInference.ImageConversion.normalise(640, 640) 19 | |> EmporiumInference.ImageConversion.to_mat() 20 | |> Serving.perform() 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /apps/emporium_inference_resnet/lib/emporium_inference_resnet/application.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.ResNet.Application do 2 | use Application 3 | 4 | def start(_type, _args) do 5 | children = [ 6 | EmporiumInference.ResNet.Serving 7 | ] 8 | 9 | options = [strategy: :one_for_one, name: EmporiumInference.ResNet.Supervisor] 10 | Supervisor.start_link(children, options) 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /apps/emporium_inference_resnet/lib/emporium_inference_resnet/serving.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.ResNet.Serving do 2 | @moduledoc """ 3 | Module-based Nx.Serving implementation for ResNet via Bumblebee 4 | """ 5 | 6 | def child_spec(options \\ []) do 7 | %{ 8 | id: __MODULE__, 9 | start: {__MODULE__, :start_link, [options]}, 10 | type: :worker, 11 | restart: :permanent, 12 | shutdown: :brutal_kill 13 | } 14 | end 15 | 16 | def start_link(options) do 17 | serving = build_serving() 18 | Nx.Serving.start_link([serving: serving, name: __MODULE__] ++ options) 19 | end 20 | 21 | def perform(%Evision.Mat{} = image) do 22 | tensor = Evision.Mat.to_nx(image, EXLA.Backend) 23 | results = Nx.Serving.batched_run(__MODULE__, [tensor]) 24 | [%{predictions: predictions}] = results 25 | {:ok, predictions, []} 26 | end 27 | 28 | defp build_serving do 29 | {:ok, model_info} = Bumblebee.load_model({:hf, "microsoft/resnet-50"}) 30 | {:ok, featurizer} = Bumblebee.load_featurizer({:hf, "microsoft/resnet-50"}) 31 | 32 | Bumblebee.Vision.image_classification(model_info, featurizer, 33 | top_k: 1, 34 | compile: [batch_size: 1], 35 | defn_options: [compiler: EXLA] 36 | ) 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /apps/emporium_inference_resnet/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.ResNet.Mixfile do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :emporium_inference_resnet, 7 | version: "0.1.0", 8 | elixir: "~> 1.13", 9 | elixirc_paths: elixirc_paths(Mix.env()), 10 | deps: deps(), 11 | start_permanent: Mix.env() == :prod, 12 | build_path: "../../_build", 13 | config_path: "../../config/config.exs", 14 | deps_path: "../../deps", 15 | lockfile: "../../mix.lock" 16 | ] 17 | end 18 | 19 | def application do 20 | [ 21 | mod: {EmporiumInference.ResNet.Application, []}, 22 | extra_applications: [:logger, :runtime_tools] 23 | ] 24 | end 25 | 26 | defp elixirc_paths(:test), do: ["lib", "test/support"] 27 | defp elixirc_paths(_), do: ["lib"] 28 | 29 | defp deps do 30 | # elixir_make requires a patched release beyond 0.7.6 due to patch: 31 | # https://github.com/elixir-lang/elixir_make/commit/58fe5b705d451a9ddf13673a785a46cda07909dc 32 | 33 | [ 34 | {:emporium_inference, in_umbrella: true}, 35 | {:axon, "~> 0.5.1"}, 36 | {:bumblebee, "~> 0.3.0"}, 37 | {:exla, ">= 0.0.0"}, 38 | {:nx, "~> 0.5.3"} 39 | ] 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/.gitignore: -------------------------------------------------------------------------------- 1 | priv/install 2 | priv/runner 3 | priv/*.pt 4 | priv/*.torchscript 5 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(NOT UNIX) 2 | message(FATAL_ERROR "Unsupported platform") 3 | endif() 4 | 5 | cmake_minimum_required(VERSION 3.12 FATAL_ERROR) 6 | 7 | project(runner) 8 | find_package(Torch REQUIRED) 9 | find_package(OpenCV REQUIRED) 10 | 11 | enable_language(CXX) 12 | set(ABSL_PROPAGATE_CXX_STD ON) 13 | set(CMAKE_BUILD_TYPE Release) 14 | set(CMAKE_CXX_STANDARD 20) 15 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 16 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -O3 -Wall -Wextra -Wno-unused-parameter") 17 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-field-initializers") 18 | if(APPLE) 19 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -undefined dynamic_lookup") 20 | endif() 21 | 22 | include(CheckLanguage) 23 | check_language(CUDA) 24 | if(CMAKE_CUDA_COMPILER) 25 | add_compile_definitions(HAVE_CUDA) 26 | endif() 27 | 28 | file(GLOB_RECURSE runner_sources 29 | LIST_DIRECTORIES false 30 | CONFIGURE_DEPENDS 31 | "${SRC_DIR}/*.cc" "${SRC_DIR}/*.hh" 32 | ) 33 | 34 | add_executable(runner ${runner_sources}) 35 | target_include_directories(runner PUBLIC "${ERL_INTERFACE_INCLUDE_DIR}") 36 | target_include_directories(runner PUBLIC "${ERTS_INCLUDE_DIR}") 37 | target_link_libraries(runner PUBLIC "${TORCH_LIBRARIES}") 38 | target_link_libraries(runner PUBLIC "${OpenCV_LIBS}") 39 | target_link_libraries(runner PUBLIC ei) 40 | target_link_directories(runner PUBLIC "${ERL_INTERFACE_LIB_DIR}") 41 | 42 | set_target_properties(runner PROPERTIES 43 | INSTALL_RPATH_USE_LINK_PATH TRUE 44 | BUILD_WITH_INSTALL_RPATH TRUE 45 | ) 46 | 47 | if(APPLE) 48 | set_target_properties(runner PROPERTIES INSTALL_RPATH "@loader_path/install") 49 | else() 50 | set_target_properties(runner PROPERTIES INSTALL_RPATH "\$ORIGIN/install") 51 | endif() 52 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/Makefile: -------------------------------------------------------------------------------- 1 | PRIV_DIR = priv 2 | EXECUTABLE = $(PRIV_DIR)/runner 3 | CMAKE_BUILD_FLAGS = --verbose 4 | CMAKE_BUILD_DIR := $(shell mktemp -d) 5 | SRC_DIR = $(shell pwd)/c_src 6 | C_SOURCES = $(wildcard $(SRC_DIR)/*.c) 7 | CC_SOURCES = $(wildcard $(SRC_DIR)/*.cc) 8 | 9 | ifdef CMAKE_TOOLCHAIN_FILE 10 | CMAKE_CONFIGURE_FLAGS=-DCMAKE_TOOLCHAIN_FILE="$(CMAKE_TOOLCHAIN_FILE)" 11 | endif 12 | 13 | .DEFAULT_GLOBAL := build 14 | 15 | build: check $(EXECUTABLE) 16 | 17 | check: 18 | @ if [ ! -d "$(LIBTORCH_INSTALL_DIR)" ]; then \ 19 | echo "LIBTORCH_INSTALL_DIR should point to an installation of libTorch"; \ 20 | exit 1; \ 21 | fi 22 | @ if [ ! -d "$(ERL_INTERFACE_LIB_DIR)" ] || [ ! -d "$(ERL_INTERFACE_INCLUDE_DIR)" ]; then \ 23 | echo "ERL_INTERFACE_LIB_DIR and ERL_INTERFACE_INCLUDE_DIR should be set"; \ 24 | exit 1; \ 25 | fi 26 | @ if [ ! -d "$(ERTS_INCLUDE_DIR)" ]; then \ 27 | echo "ERTS_INCLUDE_DIR should be set"; \ 28 | exit 1; \ 29 | fi 30 | 31 | clean: 32 | rm -rf $(PRIV_DIR)/install 33 | rm -rf $(EXECUTABLE) 34 | 35 | $(EXECUTABLE): CMakeLists.txt $(C_SOURCES) $(CC_SOURCES) 36 | @ mkdir -p $(PRIV_DIR) 37 | @ if [ "${MIX_BUILD_EMBEDDED}" = "true" ]; then \ 38 | cp -a $(abspath $(LIBTORCH_INSTALL_DIR)/lib) $(PRIV_DIR)/install ; \ 39 | else \ 40 | ln -sf $(abspath $(LIBTORCH_INSTALL_DIR)/lib) $(PRIV_DIR)/install ; \ 41 | fi 42 | @ cd $(CMAKE_BUILD_DIR) && \ 43 | cmake \ 44 | -DCMAKE_PREFIX_PATH=$(LIBTORCH_INSTALL_DIR) \ 45 | -DSRC_DIR=$(SRC_DIR) \ 46 | -DERTS_INCLUDE_DIR=$(ERTS_INCLUDE_DIR) \ 47 | -DERL_INTERFACE_LIB_DIR=$(ERL_INTERFACE_LIB_DIR) \ 48 | -DERL_INTERFACE_INCLUDE_DIR=$(ERL_INTERFACE_INCLUDE_DIR) \ 49 | -S $(shell pwd) $(CMAKE_CONFIGURE_FLAGS) && \ 50 | cmake --build . $(CMAKE_BUILD_FLAGS) 51 | @ mv $(CMAKE_BUILD_DIR)/runner $(EXECUTABLE) 52 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/c_src/runner.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "ei.h" 10 | 11 | #ifdef HAVE_CUDA 12 | #include 13 | #endif 14 | 15 | // The Runner is a C node (hidden Node using Erlang Distribution) which connects to a Parent Node 16 | // on startup. It accepts the following Environment Variables: 17 | // 18 | // - `NODE_NAME`: The name of the Erlang node which this C node connects to 19 | // - `NODE_COOKIE`: The cookie to use when connecting to the parent node 20 | // - `MODEL_TYPE`: (Reserved) either `TorchScript` or `TensorRT` 21 | // - `MODEL_PATH`: The path of the model to load 22 | // - `READY_MODULE`: The name of the Module to send a ready message via RPC 23 | // - `READY_FUNCTION`: The function in the Module to send a ready message via RPC 24 | // - `READY_VALUE`: Unique string which identifies the worker; part of the ready message 25 | // - `LOGGER_LEVEL`: Level of the Erlang logger (per-message logging will be disabled above debug) 26 | // 27 | // Upon startup, the following operations take place: 28 | // 29 | // 1. Erlang Distribution is set up and the local C node connected to the parent (identified via 30 | // `NODE_NAME` and `NODE_COOKIE`). The C node is set up as a hidden node to the parent. 31 | // 32 | // 2. The TorchScript model is loaded (path identified via `MODEL_PATH`) to be prepared for use. 33 | // 34 | // 3. A ready message is sent via RPC to the parent node: 35 | // 36 | // - Module: As identified by READY_MODULE 37 | // - Function: As identified by READY_FUNCTION 38 | // - Arguments: 2-arity List containing the PID of the virtual mailbox and the READY_VALUE 39 | // 40 | // The parent node must return `{:ok, pid}`, echoing the same PID sent, implying successful 41 | // recognition of the C node. 42 | // 43 | // Upon ready, all communication with the C node will be done via messages sent to the virtual 44 | // mailbox. The protocol has the following shape: 45 | // 46 | // - Request: `{:call, sender :: pid(), nonce :: ref(), command :: atom(), payload :: term()}` 47 | // - Response: `{:reply, nonce :: ref(), result :: term()}` 48 | // 49 | // Currently the following commands are supported: 50 | // 51 | // - Image Inference 52 | // 53 | // - Command: `:infer` 54 | // - Payload: 55 | // 56 | // { 57 | // width :: non_neg_integer(), 58 | // height :: non_neg_integer(), 59 | // format :: :RGB | :I420, 60 | // orientation :: :upright | :rotated_90_ccw | :rotated_180 | :rotated_90_cw, 61 | // data :: binary 62 | // } 63 | // 64 | // - Response: `{:ok, list(detection), durations}`, where: 65 | // 66 | // detection :: %{ 67 | // required(:x1) => float(), 68 | // required(:y1) => float(), 69 | // required(:x2) => float(), 70 | // required(:y2) => float(), 71 | // required(:class_id) => non_neg_integer(), 72 | // required(:score) => float() 73 | // } 74 | // duration :: %{ 75 | // required(atom) => non_neg_integer() 76 | // } 77 | // 78 | 79 | using std::get; 80 | using std::string; 81 | using std::vector; 82 | using std::invalid_argument; 83 | using std::runtime_error; 84 | using cv::Rect; 85 | using cv::Point; 86 | using torch::Tensor; 87 | 88 | typedef enum { 89 | ImageOrientationUpright = 0, 90 | ImageOrientationRotated90CCW = 1, 91 | ImageOrientationRotated180 = 2, 92 | ImageOrientationRotated90CW = 3 93 | } ImageOrientation; 94 | 95 | typedef enum { 96 | ImageFormatUnknown = 0, 97 | ImageFormatRGBCHW = 1, 98 | ImageFormatRGBHWC = 2, 99 | ImageFormatYUV420 = 3 100 | } ImageFormat; 101 | 102 | // typedef enum { 103 | // ImageSourceHeap = 0, 104 | // ImageSourceSharedMemory = 1 105 | // } ImageSource; 106 | 107 | typedef int64_t ImageDimension; 108 | 109 | typedef struct { 110 | ImageDimension width; 111 | ImageDimension height; 112 | ImageOrientation orientation; 113 | ImageFormat format; 114 | void *data; 115 | } Image; 116 | 117 | typedef struct { 118 | float x1; 119 | float y1; 120 | float x2; 121 | float y2; 122 | float score; 123 | unsigned long long class_id; 124 | } Detection; 125 | 126 | void setup_erlang(void); 127 | void setup_aten(void); 128 | void setup_torch(void); 129 | void setup_model(void); 130 | void send_ready(void); 131 | void receive_loop(void); 132 | void process_message(erlang_msg message, ei_x_buff buffer); 133 | void process_message_infer(erlang_pid from, erlang_ref nonce, Image image); 134 | void send_reply(erlang_pid from, erlang_ref nonce, ei_x_buff response); 135 | std::string random_name(int length); 136 | 137 | Tensor build_tensor(Image image); 138 | vector> build_detections(Tensor batch, float min_score, float min_iou); 139 | 140 | static ei_cnode ErlangNode; 141 | static erlang_pid ErlangPid; 142 | static int ErlangConnection; 143 | static bool ErlangDebug = true; 144 | static torch::Device TorchDevice = torch::kCPU; 145 | static torch::jit::script::Module TorchModel; 146 | #ifdef HAVE_CUDA 147 | static c10::DeviceIndex TorchDeviceIndex = -1; 148 | #endif 149 | 150 | int main(int argc, const char* argv[]) { 151 | setup_erlang(); 152 | setup_aten(); 153 | setup_torch(); 154 | setup_model(); 155 | send_ready(); 156 | receive_loop(); 157 | } 158 | 159 | void setup_erlang(void) { 160 | const char* node_name = getenv("NODE_NAME"); 161 | const char* node_cookie = getenv("NODE_COOKIE"); 162 | const short node_creation = time(NULL) + 1; 163 | const char* logger_level = getenv("LOGGER_LEVEL"); 164 | 165 | if (!node_name) { 166 | throw invalid_argument("NODE_NAME not set"); 167 | } 168 | 169 | if (!node_cookie) { 170 | throw invalid_argument("NODE_COOKIE not set"); 171 | } 172 | 173 | if ((0 != ei_init()) || 174 | (0 != ei_connect_init(&ErlangNode, random_name(6).c_str(), node_cookie, node_creation)) || 175 | (0 >= (ErlangConnection = ei_connect(&ErlangNode, (char*)node_name)))) { 176 | throw runtime_error("Unable to establish connection"); 177 | } 178 | 179 | if (0 != ei_make_pid(&ErlangNode, &ErlangPid)) { 180 | throw runtime_error("Unable to create mailbox PID"); 181 | } 182 | 183 | if (logger_level) { 184 | ErlangDebug = (0 == strcmp(logger_level, "debug")); 185 | } 186 | } 187 | 188 | void setup_aten(void) { 189 | at::init_num_threads(); 190 | } 191 | 192 | void setup_torch(void) { 193 | #ifdef HAVE_CUDA 194 | if (torch::cuda::is_available()) { 195 | TorchDevice = torch::kCUDA; 196 | TorchDeviceIndex = 0; 197 | std::cout << "Using GPU # " << TorchDeviceIndex << " for Inference" << std::endl; 198 | c10::cuda::setCurrentCUDAStream(c10::cuda::getStreamFromPool(true, TorchDeviceIndex)); 199 | } 200 | #endif 201 | } 202 | 203 | void setup_model(void) { 204 | const char* model_path = getenv("MODEL_PATH"); 205 | 206 | if (!model_path) { 207 | throw invalid_argument("MODEL_PATH not set"); 208 | } 209 | 210 | std::cout << "Loading Model: " << model_path << std::endl; 211 | TorchModel = torch::jit::load(model_path, TorchDevice); 212 | 213 | if (TorchDevice == torch::kCPU) { 214 | std::cout << "Using CPU" << std::endl; 215 | TorchModel.to(torch::kFloat); 216 | } else if (TorchDevice == torch::kCUDA) { 217 | std::cout << "Using CUDA" << std::endl; 218 | TorchModel.to(torch::kHalf); 219 | } 220 | } 221 | 222 | void send_ready(void) { 223 | const char* ready_module = getenv("READY_MODULE"); 224 | const char* ready_function = getenv("READY_FUNCTION"); 225 | const char* ready_value = getenv("READY_VALUE"); 226 | 227 | if (!ready_module) { 228 | throw invalid_argument("READY_MODULE not set"); 229 | } 230 | 231 | if (!ready_function) { 232 | throw invalid_argument("READY_FUNCTION not set"); 233 | } 234 | 235 | if (!ready_value) { 236 | throw invalid_argument("READY_VALUE not set"); 237 | } 238 | 239 | ei_x_buff arguments; 240 | ei_x_new(&arguments); 241 | ei_x_encode_list_header(&arguments, 2); 242 | ei_x_encode_pid(&arguments, &ErlangPid); 243 | ei_x_encode_string(&arguments, ready_value); 244 | ei_x_encode_empty_list(&arguments); 245 | 246 | std::cout << "Signalling readiness" << std::endl; 247 | if (0 > ei_rpc_to(&ErlangNode, ErlangConnection, (char*)ready_module, (char*)ready_function, 248 | arguments.buff, arguments.index)) { 249 | throw runtime_error("Unable to signal readiness via RPC"); 250 | } 251 | 252 | ei_x_buff result; 253 | ei_x_new_with_version(&result); 254 | for (;;) { 255 | erlang_msg message; 256 | switch (ei_rpc_from(&ErlangNode, ErlangConnection, ERL_NO_TIMEOUT, &message, &result)) { 257 | case ERL_MSG: { 258 | int index = 0; 259 | int version = 0; 260 | int arity = 0; 261 | char atom[MAXATOMLEN]; 262 | erlang_pid pid; 263 | if ((0 > ei_decode_version(result.buff, &index, &version)) || 264 | (0 != ei_decode_tuple_header(result.buff, &index, &arity)) || 265 | (2 != arity) || 266 | (0 > ei_decode_atom(result.buff, &index, atom)) || 267 | (0 != strcmp("rex", atom)) || 268 | (0 != ei_decode_tuple_header(result.buff, &index, &arity)) || 269 | (2 != arity) || 270 | (0 > ei_decode_atom(result.buff, &index, atom)) || 271 | (0 != strcmp("ok", atom)) || 272 | (0 != ei_decode_pid(result.buff, &index, &pid)) || 273 | (0 != ei_cmp_pids(&pid, &ErlangPid))) { 274 | continue; 275 | } 276 | std::cout << "Upstream Acknowledged" << std::endl; 277 | break; 278 | } 279 | case ERL_TICK: { 280 | continue; 281 | } 282 | case ERL_ERROR: 283 | case ERL_TIMEOUT: 284 | default: { 285 | throw runtime_error("Unable to signal readiness via RPC"); 286 | break; 287 | } 288 | } 289 | break; 290 | } 291 | 292 | ei_x_free(&arguments); 293 | ei_x_free(&result); 294 | } 295 | 296 | void receive_loop(void) { 297 | if (ErlangDebug) { 298 | std::cout << "Awaiting Message" << std::endl; 299 | } 300 | int result; 301 | do { 302 | erlang_msg message; 303 | ei_x_buff buffer; 304 | ei_x_new(&buffer); 305 | result = ei_xreceive_msg(ErlangConnection, &message, &buffer); 306 | if (result == ERL_MSG) { 307 | if (ErlangDebug) { 308 | std::cout << "Got Message" << std::endl; 309 | } 310 | if ((message.msgtype == ERL_SEND) || (message.msgtype == ERL_REG_SEND)) { 311 | process_message(message, buffer); 312 | } 313 | } 314 | ei_x_free(&buffer); 315 | } while (result != ERL_ERROR); 316 | } 317 | 318 | void process_message(erlang_msg message, ei_x_buff buffer) { 319 | int version = 0; 320 | int index = 0; 321 | int arity = 0; 322 | erlang_pid from; 323 | erlang_ref nonce; 324 | char atom[MAXATOMLEN]; 325 | 326 | if (ErlangDebug) { 327 | std::cout << "Processing Message" << std::endl; 328 | } 329 | if ((0 != ei_decode_version(buffer.buff, &index, &version)) || 330 | (0 != ei_decode_tuple_header(buffer.buff, &index, &arity)) || 331 | (5 != arity) || 332 | (0 != ei_decode_atom(buffer.buff, &index, atom)) || 333 | (0 != strcmp(atom, "call")) || 334 | (0 != ei_decode_pid(buffer.buff, &index, &from)) || 335 | (0 != ei_decode_ref(buffer.buff, &index, &nonce)) || 336 | (0 != ei_decode_atom(buffer.buff, &index, atom))) { 337 | throw runtime_error("Unable to decode message"); 338 | } 339 | 340 | if (0 == strcmp(atom, "infer")) { 341 | // {:infer, {width, height, orientation, format, data}} 342 | long image_width = 0; 343 | long image_height = 0; 344 | char image_orientation_atom[MAXATOMLEN]; 345 | char image_orientation = ImageOrientationUpright; 346 | char image_format_atom[MAXATOMLEN]; 347 | char image_format = ImageFormatUnknown; 348 | int image_byte_size = 0; 349 | int image_encoded_type = 0; 350 | void *image_data = NULL; 351 | int arity = 0; 352 | if ((0 == ei_decode_tuple_header(buffer.buff, &index, &arity)) && 353 | (5 == arity) && 354 | (0 == ei_decode_long(buffer.buff, &index, &image_width)) && 355 | (0 == ei_decode_long(buffer.buff, &index, &image_height)) && 356 | (0 == ei_decode_atom(buffer.buff, &index, image_orientation_atom)) && 357 | (0 == ei_decode_atom(buffer.buff, &index, image_format_atom)) && 358 | (0 == ei_get_type(buffer.buff, &index, &image_encoded_type, &image_byte_size)) && 359 | (ERL_BINARY_EXT == image_encoded_type) && 360 | (image_data = malloc(image_byte_size))) { 361 | long image_length = 0; 362 | if (0 == ei_decode_binary(buffer.buff, &index, image_data, &image_length)) { 363 | if (0 == strcmp(image_orientation_atom, "upright")) { 364 | image_orientation = ImageOrientationUpright; 365 | } else if (0 == strcmp(image_orientation_atom, "rotated_90_ccw")) { 366 | image_orientation = ImageOrientationRotated90CCW; 367 | } else if (0 == strcmp(image_orientation_atom, "rotated_180")) { 368 | image_orientation = ImageOrientationRotated180; 369 | } else if (0 == strcmp(image_orientation_atom, "rotated_90_cw")) { 370 | image_orientation = ImageOrientationRotated90CW; 371 | } else { 372 | throw runtime_error("Unable to handle image orientation"); 373 | } 374 | if (0 == strcmp(image_format_atom, "RGB")) { 375 | image_format = ImageFormatRGBHWC; 376 | } else if (0 == strcmp(image_format_atom, "I420")) { 377 | image_format = ImageFormatYUV420; 378 | } else { 379 | throw runtime_error("Unable to handle image format"); 380 | } 381 | Image image = { 382 | .width = (ImageDimension)image_width, 383 | .height = (ImageDimension)image_height, 384 | .orientation = (ImageOrientation)image_orientation, 385 | .format = (ImageFormat)image_format, 386 | .data = image_data 387 | }; 388 | process_message_infer(from, nonce, image); 389 | free(image_data); 390 | return; 391 | } 392 | } 393 | } 394 | throw runtime_error("Unable to handle command"); 395 | } 396 | 397 | Tensor build_tensor(Image image) { 398 | ImageDimension width = image.width; 399 | ImageDimension height = image.height; 400 | // ImageOrientation orientation = image.orientation; 401 | // ImageFormat format = image.format; 402 | void *data = image.data; 403 | Tensor input = torch::from_blob(data, {width, height, 3}, torch::kByte); 404 | 405 | if (TorchDevice == torch::kCPU) { 406 | return input.permute({2, 0, 1}).toType(torch::kFloat).div(255).unsqueeze(0); 407 | } else { 408 | return input.to(torch::kCUDA, true, true).permute({2, 0, 1}).toType(torch::kFloat16).div(255).unsqueeze(0); 409 | } 410 | } 411 | 412 | void process_message_infer(erlang_pid from, erlang_ref nonce, Image image) { 413 | using std::chrono::duration_cast; 414 | using std::chrono::microseconds; 415 | using std::chrono::steady_clock; 416 | 417 | if (ErlangDebug) { 418 | std::cout << "Processing Inference" << std::endl; 419 | } 420 | auto time_started = steady_clock::now(); 421 | 422 | Tensor tensor_input = build_tensor(image); 423 | auto time_loaded = steady_clock::now(); 424 | 425 | Tensor tensor_output = TorchModel.forward({tensor_input}).toTuple()->elements()[0].toTensor(); 426 | auto time_executed = steady_clock::now(); 427 | 428 | vector> lists_detections = build_detections(tensor_output, 0.25, 0.45); 429 | auto time_processed = steady_clock::now(); 430 | 431 | auto duration_load = duration_cast(time_loaded - time_started); 432 | auto duration_execute = duration_cast(time_executed - time_loaded); 433 | auto duration_process = duration_cast(time_processed - time_executed); 434 | 435 | ei_x_buff response; 436 | ei_x_new(&response); 437 | ei_x_encode_tuple_header(&response, 3); 438 | ei_x_encode_atom(&response, "ok"); 439 | for (vector list_detections: lists_detections) { 440 | for (Detection detection: list_detections) { 441 | ei_x_encode_list_header(&response, 1); 442 | ei_x_encode_map_header(&response, 6); 443 | ei_x_encode_atom(&response, "x1"); 444 | ei_x_encode_double(&response, detection.x1); 445 | ei_x_encode_atom(&response, "y1"); 446 | ei_x_encode_double(&response, detection.y1); 447 | ei_x_encode_atom(&response, "x2"); 448 | ei_x_encode_double(&response, detection.x2); 449 | ei_x_encode_atom(&response, "y2"); 450 | ei_x_encode_double(&response, detection.y2); 451 | ei_x_encode_atom(&response, "score"); 452 | ei_x_encode_double(&response, detection.score); 453 | ei_x_encode_atom(&response, "class_id"); 454 | ei_x_encode_char(&response, ((unsigned char)(detection.class_id))); 455 | } 456 | } 457 | ei_x_encode_empty_list(&response); 458 | ei_x_encode_list_header(&response, 3); 459 | ei_x_encode_tuple_header(&response, 2); 460 | ei_x_encode_atom(&response, "load"); 461 | ei_x_encode_longlong(&response, duration_load.count()); 462 | ei_x_encode_tuple_header(&response, 2); 463 | ei_x_encode_atom(&response, "execute"); 464 | ei_x_encode_longlong(&response, duration_execute.count()); 465 | ei_x_encode_tuple_header(&response, 2); 466 | ei_x_encode_atom(&response, "process"); 467 | ei_x_encode_longlong(&response, duration_process.count()); 468 | ei_x_encode_empty_list(&response); 469 | send_reply(from, nonce, response); 470 | ei_x_free(&response); 471 | } 472 | 473 | void send_reply(erlang_pid from, erlang_ref nonce, ei_x_buff response) { 474 | ei_x_buff message; 475 | ei_x_new_with_version(&message); 476 | ei_x_encode_tuple_header(&message, 3); 477 | ei_x_encode_atom(&message, "reply"); 478 | ei_x_encode_ref(&message, &nonce); 479 | ei_x_append(&message, &response); 480 | ei_send(ErlangConnection, &from, message.buff, message.index); 481 | } 482 | 483 | std::string random_name(int length) { 484 | std::random_device random_device; 485 | std::mt19937 random_generator(random_device()); 486 | std::uniform_int_distribution random_distribution{'a', 'z'}; 487 | std::string result(length, '\0'); 488 | for (auto& result_character: result) { 489 | result_character = random_distribution(random_generator); 490 | } 491 | return result; 492 | } 493 | 494 | Tensor build_xyxy(const Tensor& xywh) { 495 | auto xyxy = torch::zeros_like(xywh); 496 | auto cx = xywh.select(1, 0); 497 | auto cy = xywh.select(1, 1); 498 | auto w = xywh.select(1, 2); 499 | auto h = xywh.select(1, 3); 500 | xyxy.select(1, 0) = cx - w / 2; 501 | xyxy.select(1, 1) = cy - h / 2; 502 | xyxy.select(1, 2) = cx + w / 2; 503 | xyxy.select(1, 3) = cy + h / 2; 504 | return xyxy; 505 | } 506 | 507 | vector> build_detections(Tensor batch, float min_score, float min_iou) { 508 | const auto batch_sizes = batch.sizes(); 509 | const int count_attributes = 5; 510 | const int count_classes = batch_sizes[2] - count_attributes; 511 | const int batch_size = batch_sizes[0]; 512 | vector> batch_output; 513 | 514 | for (int batch_index = 0; batch_index < batch_size; batch_index++) { 515 | vector frame_output; 516 | auto predictions = batch.select(0, batch_index); 517 | auto object_score = predictions.select(1, 4); 518 | auto class_scores = predictions.slice(1, count_attributes, count_attributes + count_classes); 519 | auto class_score_max = class_scores.max(1); 520 | auto class_score = get<0>(class_score_max); 521 | auto class_id = get<1>(class_score_max); 522 | auto score = class_score * object_score; 523 | auto indices = score.ge(min_score).nonzero().select(1, 0); 524 | int indices_size = indices.size(0); 525 | 526 | if (0 == indices_size) { 527 | continue; 528 | } 529 | 530 | auto xyxy = build_xyxy(predictions.slice(1, 0, 4).index_select(0, indices)); 531 | score = score.index_select(0, indices); 532 | class_id = class_id.index_select(0, indices); 533 | 534 | #ifdef HAVE_CUDA 535 | auto xyxy_cpu = xyxy.to(torch::kCPU, true, true); 536 | auto score_cpu = score.to(torch::kCPU, true, true); 537 | auto class_id_cpu = class_id.to(torch::kCPU, true, true); 538 | c10::cuda::getCurrentCUDAStream(TorchDeviceIndex).synchronize(); 539 | #else 540 | auto xyxy_cpu = xyxy; 541 | auto score_cpu = score; 542 | auto class_id_cpu = class_id; 543 | #endif 544 | 545 | auto xyxy_accessor = xyxy_cpu.accessor(); 546 | auto score_accessor = score_cpu.accessor(); 547 | auto class_id_accessor = class_id_cpu.accessor(); 548 | 549 | vector boxes; 550 | vector scores; 551 | vector indices_nms; 552 | for (int i = 0; i < indices_size; i++) { 553 | Point x1y1 = Point(xyxy_accessor[i][0], xyxy_accessor[i][1]); 554 | Point x2y2 = Point(xyxy_accessor[i][2], xyxy_accessor[i][3]); 555 | boxes.emplace_back(Rect(x1y1, x2y2)); 556 | scores.emplace_back(score_accessor[i]); 557 | } 558 | cv::dnn::NMSBoxes(boxes, scores, min_score, min_iou, indices_nms); 559 | 560 | for (int index_nms: indices_nms) { 561 | Detection item; 562 | item.x1 = xyxy_accessor[index_nms][0]; 563 | item.y1 = xyxy_accessor[index_nms][1]; 564 | item.x2 = xyxy_accessor[index_nms][2]; 565 | item.y2 = xyxy_accessor[index_nms][3]; 566 | item.score = score_accessor[index_nms]; 567 | item.class_id = class_id_accessor[index_nms]; 568 | frame_output.emplace_back(item); 569 | } 570 | batch_output.emplace_back(frame_output); 571 | } 572 | 573 | return batch_output; 574 | } 575 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/lib/emporium_inference_yolov5.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5 do 2 | alias EmporiumInference.Image 3 | alias EmporiumInference.YOLOv5.Request 4 | 5 | @type detection :: { 6 | class_id :: non_neg_integer(), 7 | class_name :: String.t(), 8 | score :: float(), 9 | x1 :: float(), 10 | x2 :: float(), 11 | y1 :: float(), 12 | y2 :: float() 13 | } 14 | 15 | @type duration :: {atom() | String.t(), non_neg_integer()} 16 | 17 | @spec request(Image.t()) :: 18 | {:ok, [detection], [duration]} 19 | | {:error, reason :: atom() | String.t()} 20 | 21 | def request(image) do 22 | image 23 | |> EmporiumInference.ImageConversion.normalise(640, 640) 24 | |> Request.perform() 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/lib/emporium_inference_yolov5/acceptor.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5.Acceptor do 2 | @moduledoc """ 3 | The Daemon Acceptor is started by the Daemon Server based on the maximum number of concurrent 4 | jobs allowed by the underlying daemon (usually 1). The Daemon Acceptor is invoked via the 5 | Request module, and keeps track of the caller throughput. 6 | """ 7 | 8 | alias EmporiumInference.YOLOv5.Broker 9 | 10 | defmodule State do 11 | @type t :: %__MODULE__{ 12 | server_pid: nil | pid(), 13 | client_pid: nil | pid() 14 | } 15 | defstruct server_pid: nil, 16 | client_pid: nil 17 | end 18 | 19 | def child_spec(server_pid) when is_pid(server_pid) do 20 | %{ 21 | id: __MODULE__, 22 | start: {__MODULE__, :start_link, [server_pid]}, 23 | type: :worker, 24 | restart: :permanent, 25 | shutdown: :brutal_kill 26 | } 27 | end 28 | 29 | def start_link(server_pid) do 30 | pid = 31 | spawn_link(fn -> 32 | start(server_pid) 33 | end) 34 | 35 | {:ok, pid} 36 | end 37 | 38 | def start(server_pid) do 39 | loop(%State{server_pid: server_pid}) 40 | end 41 | 42 | def loop(%State{client_pid: client_pid} = state) when is_pid(client_pid) do 43 | _ = Process.unlink(client_pid) 44 | loop(%{state | client_pid: nil}) 45 | end 46 | 47 | def loop(%State{} = state) do 48 | with {:go, _ref, client_pid, _relative_time, _sojourn_time} <- :sbroker.ask_r(Broker) do 49 | true = Process.link(client_pid) 50 | state = %{state | client_pid: client_pid} 51 | accept(state) 52 | else 53 | {:drop, _sojourn_time} -> :ok 54 | end 55 | end 56 | 57 | def accept(%State{} = state) do 58 | receive do 59 | {:call, call} -> handle_call(call, state) 60 | after 61 | 5000 -> :ok 62 | end 63 | end 64 | 65 | def handle_call(call, %State{} = state) do 66 | result = GenServer.call(state.server_pid, call) 67 | send(state.client_pid, result) 68 | loop(state) 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/lib/emporium_inference_yolov5/application.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5.Application do 2 | use Application 3 | 4 | def start(_type, _args) do 5 | children = [ 6 | EmporiumInference.YOLOv5.Broker, 7 | EmporiumInference.YOLOv5.Runner 8 | ] 9 | 10 | options = [strategy: :one_for_one, name: EmporiumInference.YOLOv5.Supervisor] 11 | Supervisor.start_link(children, options) 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/lib/emporium_inference_yolov5/broker.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5.Broker do 2 | @moduledoc """ 3 | Connection Broker allowing graceful introduction and removal of client-side inference 4 | requests. 5 | 6 | - The Ask queue (for inference requests) uses :sbroker_codel_queue. 7 | - The AskR queue (for runners) uses :sbroker_drop_queue with infinity capacity. 8 | """ 9 | 10 | def child_spec(init_args) do 11 | %{ 12 | id: __MODULE__, 13 | start: {__MODULE__, :start_link, [init_args]}, 14 | type: :supervisor, 15 | restart: :permanent, 16 | shutdown: :infinity 17 | } 18 | end 19 | 20 | def start_link(_init_args) do 21 | :sbroker.start_link({:local, __MODULE__}, __MODULE__, [], []) 22 | end 23 | 24 | def init(_init_args) do 25 | ask_queue_spec = {:sbroker_codel_queue, %{min: 10}} 26 | ask_r_queue_spec = {:sbroker_drop_queue, %{max: :infinity}} 27 | {:ok, {ask_queue_spec, ask_r_queue_spec, []}} 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/lib/emporium_inference_yolov5/request.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5.Request do 2 | alias EmporiumInference.YOLOv5.Broker 3 | 4 | def perform(image, timeout \\ :infinity) do 5 | fn pid -> 6 | send_call(pid, {:infer, image}, timeout) 7 | end 8 | |> ask(timeout) 9 | |> handle_infer() 10 | end 11 | 12 | defp ask(callback, timeout) do 13 | :sbroker.dynamic_ask(Broker, self()) 14 | |> handle_broker(callback, timeout) 15 | end 16 | 17 | defp handle_broker({:go, _, pid, _, _}, callback, _timeout) do 18 | callback.(pid) 19 | end 20 | 21 | defp handle_broker({:await, tag, _}, callback, timeout) do 22 | :sbroker.await(tag, :infinity) 23 | |> handle_broker(callback, timeout) 24 | end 25 | 26 | defp handle_broker({:drop, _}, _callback, _timeout) do 27 | {:error, :dropped} 28 | end 29 | 30 | defp handle_infer({:ok, detections, durations}) do 31 | {:ok, replace_class_names(detections), durations} 32 | end 33 | 34 | defp handle_infer({:error, reason}) do 35 | {:error, reason} 36 | end 37 | 38 | defp replace_class_names(detections) do 39 | for detection <- detections, {:ok, class_id} = Map.fetch(detection, :class_id) do 40 | Map.put(detection, :class_name, get_class_name(class_id)) 41 | end 42 | end 43 | 44 | defp send_call(pid, call, timeout) do 45 | send(pid, {:call, call}) 46 | 47 | receive do 48 | {:ok, results, durations} -> {:ok, results, durations} 49 | {:error, reason} -> {:error, reason} 50 | after 51 | timeout -> {:error, :timeout} 52 | end 53 | end 54 | 55 | def get_class_name(class_id) do 56 | [ 57 | "person", 58 | "bicycle", 59 | "car", 60 | "motorcycle", 61 | "airplane", 62 | "bus", 63 | "train", 64 | "truck", 65 | "boat", 66 | "traffic light", 67 | "fire hydrant", 68 | "stop sign", 69 | "parking meter", 70 | "bench", 71 | "bird", 72 | "cat", 73 | "dog", 74 | "horse", 75 | "sheep", 76 | "cow", 77 | "elephant", 78 | "bear", 79 | "zebra", 80 | "giraffe", 81 | "backpack", 82 | "umbrella", 83 | "handbag", 84 | "tie", 85 | "suitcase", 86 | "frisbee", 87 | "skis", 88 | "snowboard", 89 | "sports ball", 90 | "kite", 91 | "baseball bat", 92 | "baseball glove", 93 | "skateboard", 94 | "surfboard", 95 | "tennis racket", 96 | "bottle", 97 | "wine glass", 98 | "cup", 99 | "fork", 100 | "knife", 101 | "spoon", 102 | "bowl", 103 | "banana", 104 | "apple", 105 | "sandwich", 106 | "orange", 107 | "broccoli", 108 | "carrot", 109 | "hot dog", 110 | "pizza", 111 | "donut", 112 | "cake", 113 | "chair", 114 | "couch", 115 | "potted plant", 116 | "bed", 117 | "dining table", 118 | "toilet", 119 | "tv", 120 | "laptop", 121 | "mouse", 122 | "remote", 123 | "keyboard", 124 | "cell phone", 125 | "microwave", 126 | "oven", 127 | "toaster", 128 | "sink", 129 | "refrigerator", 130 | "book", 131 | "clock", 132 | "vase", 133 | "scissors", 134 | "teddy bear", 135 | "hair drier", 136 | "toothbrush" 137 | ] 138 | |> Enum.at(class_id) 139 | end 140 | end 141 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/lib/emporium_inference_yolov5/runner.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5.Runner do 2 | @moduledoc """ 3 | The Runner is responsible for starting the external Model Runner process (daemon), which 4 | interacts with the GPU and runs inferrence, and exposing the resultant capacity via the Broker 5 | as a pool of Daemon Acceptors. 6 | """ 7 | 8 | @otp_app Mix.Project.config()[:app] 9 | use GenServer 10 | require Logger 11 | alias EmporiumInference.Image 12 | 13 | defmodule State do 14 | @type t :: %__MODULE__{ 15 | status: :pending | {:starting, ready_value :: String.t()} | :available | :failed, 16 | exec_pid: pid() | nil, 17 | exec_os_pid: non_neg_integer() | nil, 18 | mailbox_pid: pid() | nil, 19 | pending_calls: :queue.queue({message :: term(), from :: GenServer.from()}), 20 | pending_requests: %{required(reference()) => from :: GenServer.from()}, 21 | acceptors_count: non_neg_integer(), 22 | acceptors_supervisor_pid: pid() | nil 23 | } 24 | defstruct status: :pending, 25 | exec_pid: nil, 26 | exec_os_pid: nil, 27 | mailbox_pid: nil, 28 | pending_calls: :queue.new(), 29 | pending_requests: %{}, 30 | acceptors_count: 1, 31 | acceptors_supervisor_pid: nil 32 | end 33 | 34 | alias EmporiumInference.YOLOv5.Acceptor 35 | 36 | @doc """ 37 | Starts the Model Runner 38 | """ 39 | def start_link(init_arg) do 40 | GenServer.start_link(__MODULE__, init_arg, name: __MODULE__) 41 | end 42 | 43 | @doc """ 44 | Called by the Runner via RPC once the model has been fully loaded 45 | """ 46 | def ready(mailbox_pid, ready_value) do 47 | with :ok = GenServer.call(__MODULE__, {:ready, mailbox_pid, to_string(ready_value)}) do 48 | {:ok, mailbox_pid} 49 | end 50 | end 51 | 52 | @impl GenServer 53 | def init(_) do 54 | {:ok, %State{}, {:continue, :init}} 55 | end 56 | 57 | @impl GenServer 58 | def handle_call({:ready, mailbox_pid, x}, _from, %State{status: {:starting, x}} = state) do 59 | state = %{state | status: :available, mailbox_pid: mailbox_pid} 60 | {:ok, state} = start_acceptors(state) 61 | {:reply, :ok, state, {:continue, :ready}} 62 | end 63 | 64 | @impl GenServer 65 | def handle_call(message, from, %State{status: {:starting, _}} = state) do 66 | pending_calls = :queue.in({message, from}, state.pending_calls) 67 | state = %{state | pending_calls: pending_calls} 68 | {:noreply, state} 69 | end 70 | 71 | @impl GenServer 72 | def handle_call( 73 | {:infer, 74 | %Image{ 75 | width: width, 76 | height: height, 77 | orientation: orientation, 78 | format: format, 79 | data: data 80 | }}, 81 | from, 82 | %State{status: :available} = state 83 | ) do 84 | {:ok, state} = send_request(:infer, {width, height, orientation, format, data}, from, state) 85 | {:noreply, state} 86 | end 87 | 88 | @impl GenServer 89 | def handle_continue(:init, %State{status: :pending} = state) do 90 | _ = Process.flag(:trap_exit, true) 91 | {:ok, pid, os_pid, ready_value} = start_runner() 92 | {:noreply, %{state | status: {:starting, ready_value}, exec_pid: pid, exec_os_pid: os_pid}} 93 | end 94 | 95 | @impl GenServer 96 | def handle_continue(:ready, %State{status: :available} = state) do 97 | with {{:value, {message, from}}, queue} <- :queue.out(state.pending_calls), 98 | state = %{state | pending_calls: queue}, 99 | {:noreply, state} <- handle_call(message, from, state) do 100 | {:noreply, state, {:continue, :ready}} 101 | else 102 | {:empty, _queue} -> {:noreply, state} 103 | _ -> {:stop, :error} 104 | end 105 | end 106 | 107 | @impl GenServer 108 | def handle_info({:stdout, _os_pid, message}, %State{} = state) do 109 | for line <- String.split(message, "\n"), line = String.trim(line), line != "" do 110 | _ = Logger.debug(line) 111 | end 112 | 113 | {:noreply, state} 114 | end 115 | 116 | @impl GenServer 117 | def handle_info({:stderr, _os_pid, message}, %State{} = state) do 118 | for line <- String.split(message, "\n"), line = String.trim(line), line != "" do 119 | _ = Logger.error(line) 120 | end 121 | 122 | {:noreply, state} 123 | end 124 | 125 | @impl GenServer 126 | def handle_info({:EXIT, pid, _}, %State{exec_pid: pid} = state) do 127 | {:stop, :bad_executable, %{state | status: :failed}} 128 | end 129 | 130 | @impl GenServer 131 | def handle_info({:EXIT, _, _}, %State{} = state) do 132 | # Acceptor exited 133 | {:noreply, state} 134 | end 135 | 136 | @impl GenServer 137 | def handle_info({:reply, reference, result}, %State{} = state) do 138 | {:ok, state} = send_response(reference, result, state) 139 | {:noreply, state} 140 | end 141 | 142 | defp send_request(command, payload, from, state) do 143 | reference = make_ref() 144 | call = {:call, self(), reference, command, payload} 145 | pending_requests = put_in(state.pending_requests, [reference], from) 146 | state = %{state | pending_requests: pending_requests} 147 | send(state.mailbox_pid, call) 148 | {:ok, state} 149 | end 150 | 151 | defp send_response(reference, result, state) do 152 | {from, pending_requests} = pop_in(state.pending_requests, [reference]) 153 | :ok = GenServer.reply(from, result) 154 | state = %{state | pending_requests: pending_requests} 155 | {:ok, state} 156 | end 157 | 158 | defp start_runner do 159 | priv_path = Application.app_dir(@otp_app, "priv") 160 | executable_path = Path.join(priv_path, "runner") 161 | model_path = Path.join(priv_path, get_model_name()) 162 | ready_module = to_string(__MODULE__) 163 | ready_function = to_string("ready") 164 | ready_value = to_string(:erlang.unique_integer([:positive])) 165 | logger_level = to_string(Logger.level()) 166 | 167 | environment = [ 168 | {"NODE_NAME", to_string(Node.self())}, 169 | {"NODE_COOKIE", to_string(Node.get_cookie())}, 170 | {"MODEL_PATH", model_path}, 171 | {"READY_MODULE", ready_module}, 172 | {"READY_FUNCTION", ready_function}, 173 | {"READY_VALUE", ready_value}, 174 | {"LOGGER_LEVEL", logger_level} 175 | ] 176 | 177 | options = [ 178 | {:stdout, self()}, 179 | {:stderr, self()}, 180 | {:env, [:clear | environment]} 181 | ] 182 | 183 | with {:ok, pid, os_pid} <- :exec.run_link([to_string(executable_path)], options) do 184 | {:ok, pid, os_pid, ready_value} 185 | end 186 | end 187 | 188 | defp start_acceptors(state) do 189 | {:ok, pid} = DynamicSupervisor.start_link([]) 190 | 191 | for _ <- 1..state.acceptors_count do 192 | {:ok, _} = DynamicSupervisor.start_child(pid, {Acceptor, self()}) 193 | end 194 | 195 | {:ok, %{state | acceptors_supervisor_pid: pid}} 196 | end 197 | 198 | defp get_model_name do 199 | Application.get_env(@otp_app, :model_name, "yolov5s.torchscript") 200 | end 201 | end 202 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5.Mixfile do 2 | use Mix.Project 3 | @libtorch_dir Path.join(__DIR__, "../../vendor/libtorch") 4 | 5 | unless File.dir?(@libtorch_dir) do 6 | raise "Please install libtorch in #{@libtorch_dir}" 7 | end 8 | 9 | def project do 10 | [ 11 | app: :emporium_inference_yolov5, 12 | version: "0.1.0", 13 | elixir: "~> 1.13", 14 | elixirc_paths: elixirc_paths(Mix.env()), 15 | deps: deps(), 16 | 17 | # Application 18 | start_permanent: Mix.env() == :prod, 19 | build_path: "../../_build", 20 | config_path: "../../config/config.exs", 21 | deps_path: "../../deps", 22 | lockfile: "../../mix.lock", 23 | 24 | # Compilers 25 | compilers: [:elixir_make] ++ Mix.compilers(), 26 | make_targets: ["priv/runner"], 27 | make_clean: ["clean"], 28 | make_env: %{ 29 | "LIBTORCH_INSTALL_DIR" => @libtorch_dir, 30 | "MIX_BUILD_EMBEDDED" => "#{Mix.Project.config()[:build_embedded]}" 31 | } 32 | ] 33 | end 34 | 35 | def application do 36 | [ 37 | mod: {EmporiumInference.YOLOv5.Application, []}, 38 | extra_applications: [:logger, :runtime_tools] 39 | ] 40 | end 41 | 42 | defp elixirc_paths(:test), do: ["lib", "test/support"] 43 | defp elixirc_paths(_), do: ["lib"] 44 | 45 | defp deps do 46 | [ 47 | {:emporium_inference, in_umbrella: true}, 48 | {:elixir_make, "~> 0.7.6"}, 49 | {:erlexec, "~> 1.21.0"}, 50 | {:sbroker, "1.0.0"} 51 | ] 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/test/inference_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumInference.YOLOv5Test do 2 | use ExUnit.Case 3 | require Logger 4 | alias EmporiumInference.YOLOv5.Request 5 | alias EmporiumInference.Image 6 | 7 | setup do 8 | if Node.self() == :nonode@nohost do 9 | raise "Erlang Distribution is required to test (use ./bin/test)" 10 | end 11 | 12 | :ok 13 | end 14 | 15 | test "Can decode images" do 16 | paths = __DIR__ |> Path.join("../../../vendor/dataset/coco/val2017/*.jpg") |> Path.wildcard() 17 | 18 | if Enum.empty?(paths) do 19 | raise "No COCO 2017 images exist; run ./vendor/setup-coco.sh to prepare" 20 | end 21 | 22 | for path <- Stream.repeatedly(fn -> Enum.random(paths) end) |> Enum.take(10) do 23 | {time_load, image} = 24 | :timer.tc(fn -> 25 | mat = Evision.imread(path) |> Evision.cvtColor(Evision.Constant.cv_COLOR_BGR2RGB()) 26 | {height, width, 3} = mat.shape 27 | %Image{width: width, height: height, data: Evision.Mat.to_binary(mat)} 28 | end) 29 | 30 | {time_normalise, image} = 31 | :timer.tc(fn -> 32 | EmporiumInference.ImageConversion.normalise(image, 640, 640) 33 | end) 34 | 35 | {time_perform, {:ok, detections, durations}} = 36 | :timer.tc(fn -> 37 | Request.perform(image) 38 | end) 39 | 40 | durations_preprocessing = [file_load: time_load, file_normalise: time_normalise] 41 | durations_processing = Keyword.new(durations) 42 | durations_internal = Enum.sum(for {_, v} <- durations, do: v) 43 | durations_overall = [comms: (time_perform - durations_internal)] 44 | durations_all = durations_preprocessing ++ durations_processing ++ durations_overall 45 | IO.inspect [Path.basename(path), detections, durations_all], limit: :infinity 46 | end 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /apps/emporium_inference_yolov5/test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start() 2 | -------------------------------------------------------------------------------- /apps/emporium_nexus/config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :membrane_core, use_push_flow_control: true 4 | config :membrane_core, :logger, verbose: true, level: :info 5 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/application.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.Application do 2 | use Application 3 | 4 | def start(_type, _args) do 5 | children = [ 6 | EmporiumNexus.KeyServer 7 | ] 8 | 9 | opts = [strategy: :one_for_one, name: EmporiumNexus.Supervisor] 10 | Supervisor.start_link(children, opts) 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/config.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.Config do 2 | alias EmporiumNexus.KeyServer 3 | 4 | def get_turn_options do 5 | {:ok, ip} = get_turn_ip() 6 | {:ok, mock_ip} = get_turn_mock_ip() 7 | {:ok, ports_range} = get_turn_ports_range() 8 | options = [ip: ip, mock_ip: mock_ip, ports_range: ports_range] 9 | 10 | case get_turn_tls_certificate_path() do 11 | {:ok, path} -> [cert_file: path] ++ options 12 | _ -> [cert_file: nil] ++ options 13 | end 14 | end 15 | 16 | def get_rtc_network_options do 17 | {:ok, dtls_cert} = GenServer.call(KeyServer, :get_dtls_cert) 18 | {:ok, dtls_pkey} = GenServer.call(KeyServer, :get_dtls_pkey) 19 | 20 | [ 21 | integrated_turn_options: get_turn_options(), 22 | integrated_turn_domain: nil, 23 | dtls_pkey: dtls_pkey, 24 | dtls_cert: dtls_cert 25 | ] 26 | end 27 | 28 | def get_webrtc_extensions(options) do 29 | alias Membrane.WebRTC.Extension.{Mid, RepairedRid, Rid, TWCC, VAD} 30 | alias EmporiumNexus.VideoOrientationExtension, as: VideoOrientation 31 | extensions = [TWCC, VideoOrientation] 32 | 33 | Enum.reduce(options, extensions, fn 34 | :simulcast, extensions -> [Mid, Rid, RepairedRid] ++ extensions 35 | :voice_activity_detection, extensions -> [VAD] ++ extensions 36 | end) 37 | end 38 | 39 | def get_webrtc_handshake_options do 40 | {:ok, dtls_cert} = GenServer.call(KeyServer, :get_dtls_cert) 41 | {:ok, dtls_pkey} = GenServer.call(KeyServer, :get_dtls_pkey) 42 | [client_mode: false, dtls_srtp: true, pkey: dtls_pkey, cert: dtls_cert] 43 | end 44 | 45 | defp get_turn_ip do 46 | case System.fetch_env("TURN_IP") do 47 | {:ok, value} -> parse_address(value) 48 | :error -> {:ok, {0, 0, 0, 0}} 49 | end 50 | end 51 | 52 | defp get_turn_mock_ip do 53 | case System.fetch_env("TURN_MOCK_IP") do 54 | {:ok, value} when is_binary(value) -> parse_address(value) 55 | :error -> get_turn_ip() 56 | end 57 | end 58 | 59 | defp get_turn_ports_range do 60 | with {:ok, value_from} <- System.fetch_env("TURN_PORT_UDP_FROM"), 61 | {:ok, value_to} <- System.fetch_env("TURN_PORT_UDP_TO"), 62 | {:ok, from_port} <- parse_port(value_from), 63 | {:ok, to_port} <- parse_port(value_to), 64 | true <- from_port > 1024, 65 | true <- from_port <= to_port do 66 | {:ok, {from_port, to_port}} 67 | else 68 | _ -> :error 69 | end 70 | end 71 | 72 | def get_turn_tcp_port do 73 | with {:ok, value} <- System.fetch_env("TURN_PORT_TCP"), 74 | {:ok, port} <- parse_port(value) do 75 | {:ok, port} 76 | else 77 | _ -> {:ok, nil} 78 | end 79 | end 80 | 81 | def get_turn_tls_port do 82 | with {:ok, value} <- System.fetch_env("TURN_PORT_TLS"), 83 | {:ok, port} <- parse_port(value) do 84 | {:ok, port} 85 | else 86 | _ -> :error 87 | end 88 | end 89 | 90 | def get_turn_tls_certificate_path do 91 | System.fetch_env("TURN_CERT_TLS") 92 | end 93 | 94 | defp parse_port(port_value) do 95 | with true <- is_binary(port_value), 96 | port when port in 1..65_535 <- String.to_integer(port_value) do 97 | {:ok, port} 98 | else 99 | _ -> :error 100 | end 101 | end 102 | 103 | defp parse_address(value) do 104 | case value |> to_charlist() |> :inet.parse_address() do 105 | {:ok, address} -> {:ok, address} 106 | {:error, :einval} -> :error 107 | end 108 | end 109 | end 110 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/inference_endpoint.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.InferenceEndpoint do 2 | @moduledoc """ 3 | The Inference Endpoint is added to an existing WebRTC Engine which runs the Inference Session, 4 | responsible for carrying out inference on frames from each added track, which represents video 5 | sent from the client via WebRTC. 6 | """ 7 | 8 | use Membrane.Bin 9 | require Membrane.Logger 10 | alias Membrane.RTC.Engine 11 | alias Membrane.RTC.Engine.Endpoint.WebRTC.TrackReceiver 12 | alias Membrane.RTC.Engine.Track 13 | alias EmporiumNexus.VideoOrientationTracker 14 | alias EmporiumNexus.VideoOrientationExtension 15 | alias EmporiumNexus.VideoFormatTracker 16 | alias Membrane.WebRTC.Extension 17 | alias EmporiumNexus.InferenceSink 18 | 19 | def_input_pad :input, 20 | demand_unit: :buffers, 21 | accepted_format: _any, 22 | availability: :on_request 23 | 24 | def_options rtc_engine_pid: [ 25 | spec: pid(), 26 | description: "Pid of RTC Engine" 27 | ], 28 | owner_pid: [ 29 | spec: pid(), 30 | description: "Pid of parent where notifications will be sent to" 31 | ] 32 | 33 | @impl Membrane.Bin 34 | def handle_init(_ctx, %{rtc_engine_pid: rtc_engine_pid, owner_pid: owner_pid}) do 35 | state = %{rtc_engine_pid: rtc_engine_pid, owner_pid: owner_pid, tracks: %{}, active: false} 36 | {[], state} 37 | end 38 | 39 | @impl true 40 | def handle_parent_notification({:new_tracks, tracks}, ctx, state) do 41 | {:endpoint, endpoint_id} = ctx.name 42 | 43 | tracks = 44 | Enum.reduce(tracks, state.tracks, fn track, tracks -> 45 | with true <- should_subscribe_track?(track) do 46 | :ok = Engine.subscribe(state.rtc_engine_pid, endpoint_id, track.id) 47 | Map.put(tracks, track.id, track) 48 | else 49 | false -> tracks 50 | end 51 | end) 52 | 53 | {[], %{state | tracks: tracks}} 54 | end 55 | 56 | @impl true 57 | def handle_parent_notification(_msg, _ctx, state) do 58 | {[], state} 59 | end 60 | 61 | @impl true 62 | def handle_child_notification({:variant_switched, _variant, _reason}, _child, _ctx, state) do 63 | {[], state} 64 | end 65 | 66 | @impl true 67 | def handle_child_notification( 68 | {:format_changed, format}, 69 | {:video_format_tracker, _track_id}, 70 | _ctx, 71 | %{active: true} = state 72 | ) do 73 | send(state.owner_pid, {:format_changed, format}) 74 | {[], state} 75 | end 76 | 77 | @impl true 78 | def handle_child_notification( 79 | {:orientation_changed, data}, 80 | {:video_orientation_tracker, track_id}, 81 | _ctx, 82 | %{active: true} = state 83 | ) do 84 | send(state.owner_pid, {:orientation_changed, data}) 85 | {[{:notify_child, {{:inferrer, track_id}, {:orientation_changed, data}}}], state} 86 | end 87 | 88 | @impl true 89 | def handle_child_notification(_notification, _child, _ctx, state) do 90 | {[], state} 91 | end 92 | 93 | @impl true 94 | def handle_spec_started(children, _ctx, state) do 95 | if Enum.any?(children, fn 96 | {:inferrer, _} -> true 97 | _ -> false 98 | end) do 99 | {[], %{state | active: true}} 100 | else 101 | {[], state} 102 | end 103 | end 104 | 105 | @impl true 106 | def handle_pad_added(Pad.ref(:input, track_id) = input_pad, _ctx, state) do 107 | track = Map.fetch!(state.tracks, track_id) 108 | child_spec = build_child_spec(input_pad, track, state.owner_pid) 109 | {[spec: child_spec], state} 110 | end 111 | 112 | defp build_child_spec(input_pad, track, owner_pid) do 113 | uri = VideoOrientationExtension.uri() 114 | [id] = get_in(track.ctx, [Extension, Access.filter(&(&1.uri == uri)), Access.key(:id)]) 115 | 116 | track_receiver = %TrackReceiver{track: track, initial_target_variant: :high} 117 | track_depayloader = Track.get_depayloader(track) 118 | video_orientation_tracker = %VideoOrientationTracker{extension_id: id} 119 | h264_parser = %Membrane.H264.FFmpeg.Parser{alignment: :au, attach_nalus?: true} 120 | h264_decoder = Membrane.H264.FFmpeg.Decoder 121 | video_size_tracker = VideoFormatTracker 122 | inference_sink = %InferenceSink{owner_pid: owner_pid} 123 | 124 | [ 125 | bin_input(input_pad) 126 | |> child({:track_receiver, track.id}, track_receiver) 127 | |> child({:depayloader, track.id}, track_depayloader) 128 | |> child({:video_orientation_tracker, track.id}, video_orientation_tracker) 129 | |> child({:parser, track.id}, h264_parser) 130 | |> child({:decoder, track.id}, h264_decoder) 131 | |> child({:video_format_tracker, track.id}, video_size_tracker) 132 | |> child({:inferrer, track.id}, inference_sink) 133 | ] 134 | end 135 | 136 | defp should_subscribe_track?(%Membrane.RTC.Engine.Track{} = track) do 137 | cond do 138 | track.type != :video -> false 139 | true -> Enum.member?([:H264], track.encoding) 140 | end 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/inference_requestor.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.InferenceRequestor do 2 | def request_format(pid, format) do 3 | GenServer.call(pid, {:request_format, format}) 4 | end 5 | 6 | def request_buffer(pid, buffer) do 7 | GenServer.call(pid, {:request_buffer, buffer}) 8 | end 9 | 10 | def start_link(owner_pid, request_fun) do 11 | GenServer.start_link(__MODULE__, {owner_pid, request_fun}) 12 | end 13 | 14 | defmodule State do 15 | defstruct owner_pid: nil, 16 | request_pid: nil, 17 | request_fun: nil, 18 | pending_format: nil, 19 | pending_buffer: nil 20 | end 21 | 22 | def init({owner_pid, request_fun}) do 23 | _ = Process.flag(:trap_exit, true) 24 | _ = Process.monitor(owner_pid) 25 | {:ok, %State{owner_pid: owner_pid, request_fun: request_fun}} 26 | end 27 | 28 | def handle_call({:request_format, format}, _from, state) do 29 | {:reply, :ok, %{state | pending_format: format, pending_buffer: nil}} 30 | end 31 | 32 | def handle_call({:request_buffer, _buffer}, _from, %State{pending_format: nil} = state) do 33 | {:reply, :ok, state} 34 | end 35 | 36 | def handle_call({:request_buffer, buffer}, _from, %State{pending_buffer: nil} = state) do 37 | {:reply, :ok, %{state | pending_buffer: buffer}, {:continue, :request_upstream}} 38 | end 39 | 40 | def handle_call({:request_buffer, buffer}, _from, %State{} = state) do 41 | {:reply, :ok, %{state | pending_buffer: buffer}} 42 | end 43 | 44 | def handle_info({:DOWN, _, :process, pid, _}, %State{owner_pid: pid} = state) do 45 | {:stop, :normal, state} 46 | end 47 | 48 | def handle_info({:EXIT, pid, _reason}, %State{owner_pid: pid} = state) do 49 | {:stop, :normal, state} 50 | end 51 | 52 | def handle_info({:EXIT, pid, _reason}, %State{request_pid: pid, pending_buffer: nil} = state) do 53 | {:noreply, %{state | request_pid: nil}} 54 | end 55 | 56 | def handle_info({:EXIT, pid, _reason}, %State{request_pid: pid} = state) do 57 | {:noreply, %{state | request_pid: nil}, {:continue, :request_upstream}} 58 | end 59 | 60 | def handle_info({:EXIT, _pid, _reason}, state) do 61 | {:noreply, state} 62 | end 63 | 64 | def handle_continue(:request_upstream, state) do 65 | spawn_link(fn -> 66 | reply = state.request_fun.(state.pending_buffer, state.pending_format) 67 | send(state.owner_pid, reply) 68 | end) 69 | |> (&{:noreply, %{state | request_pid: &1}}).() 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/inference_session.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.InferenceSession do 2 | @moduledoc """ 3 | Encapsulates a single-user interactive session for object inference utilising WebRTC for video 4 | ingestion. 5 | """ 6 | 7 | use GenServer 8 | require Membrane.Logger 9 | require Membrane.OpenTelemetry 10 | alias EmporiumNexus.Config 11 | alias EmporiumNexus.InferenceEndpoint 12 | alias Membrane.ICE.TURNManager 13 | alias Membrane.RTC.Engine 14 | alias Membrane.RTC.Engine.Endpoint.WebRTC 15 | alias Membrane.RTC.Engine.Endpoint.WebRTC.SimulcastConfig 16 | alias Membrane.RTC.Engine.Message.EndpointCrashed 17 | alias Membrane.RTC.Engine.Message.EndpointMessage 18 | alias Membrane.WebRTC.Track.Encoding 19 | 20 | @type option :: {:session_id, term()} | {:simulcast?, true | false} 21 | 22 | @spec start_link([option], GenServer.options()) :: GenServer.on_start() 23 | def start_link(init_arg, options \\ []) do 24 | GenServer.start_link(__MODULE__, init_arg, options) 25 | end 26 | 27 | @spec add_peer_channel(pid(), pid(), String.t()) :: :ok 28 | def add_peer_channel(session_pid, peer_channel_pid, peer_id) do 29 | GenServer.call(session_pid, {:add_peer, peer_channel_pid, peer_id}) 30 | end 31 | 32 | @spec session_span_id(String.t()) :: String.t() 33 | def session_span_id(id), do: "session:#{id}" 34 | 35 | @impl true 36 | def init(options) do 37 | {:ok, session_id} = Keyword.fetch(options, :session_id) 38 | {:ok, simulcast?} = Keyword.fetch(options, :simulcast?) 39 | 40 | Logger.metadata(session_id: session_id) 41 | Membrane.Logger.info("Spawning room process: #{inspect(self())}") 42 | 43 | trace_ctx = Membrane.OpenTelemetry.new_ctx() 44 | _ = Membrane.OpenTelemetry.attach(trace_ctx) 45 | 46 | span_id = session_span_id(session_id) 47 | session_span = Membrane.OpenTelemetry.start_span(span_id) 48 | _ = Membrane.OpenTelemetry.set_attributes(span_id, tracing_metadata()) 49 | 50 | turn_options = Config.get_turn_options() 51 | rtc_network_options = Config.get_rtc_network_options() 52 | 53 | with {:ok, port} <- Config.get_turn_tcp_port() do 54 | TURNManager.ensure_tcp_turn_launched(turn_options, port: port) 55 | end 56 | 57 | with {:ok, port} <- Config.get_turn_tls_port(), 58 | {:ok, _path} <- Config.get_turn_tls_certificate_path() do 59 | TURNManager.ensure_tls_turn_launched(turn_options, port: port) 60 | end 61 | 62 | rtc_engine_options = [id: session_id, trace_ctx: trace_ctx, parent_span: session_span] 63 | {:ok, rtc_engine_pid} = Membrane.RTC.Engine.start_link(rtc_engine_options, []) 64 | Engine.register(rtc_engine_pid, self()) 65 | 66 | {:ok, 67 | %{ 68 | session_id: session_id, 69 | rtc_engine_pid: rtc_engine_pid, 70 | peer_id: nil, 71 | peer_channel_pid: nil, 72 | network_options: rtc_network_options, 73 | trace_ctx: trace_ctx, 74 | simulcast?: simulcast? 75 | }} 76 | end 77 | 78 | @impl true 79 | def handle_call({:add_peer, peer_channel_pid, peer_id}, _from, %{peer_id: nil} = state) do 80 | state = %{state | peer_channel_pid: peer_channel_pid, peer_id: peer_id} 81 | send(peer_channel_pid, {:simulcast_config, state.simulcast?}) 82 | Process.monitor(peer_channel_pid) 83 | Membrane.Logger.info("New peer: #{inspect(peer_id)}. Accepting.") 84 | peer_node = node(peer_channel_pid) 85 | 86 | inference_endpoint = %InferenceEndpoint{ 87 | rtc_engine_pid: state.rtc_engine_pid, 88 | owner_pid: peer_channel_pid 89 | } 90 | 91 | :ok = Engine.add_endpoint(state.rtc_engine_pid, inference_endpoint, endpoint_id: "inference") 92 | 93 | rtc_endpoint = %WebRTC{ 94 | rtc_engine: state.rtc_engine_pid, 95 | ice_name: peer_id, 96 | owner: self(), 97 | integrated_turn_options: state.network_options[:integrated_turn_options], 98 | integrated_turn_domain: state.network_options[:integrated_turn_domain], 99 | handshake_opts: Config.get_webrtc_handshake_options(), 100 | log_metadata: [peer_id: peer_id], 101 | trace_context: state.trace_ctx, 102 | webrtc_extensions: Config.get_webrtc_extensions((state.simulcast? && [:simulcast]) || []), 103 | rtcp_sender_report_interval: Membrane.Time.seconds(5), 104 | rtcp_receiver_report_interval: Membrane.Time.seconds(5), 105 | filter_codecs: &filter_codecs/1, 106 | toilet_capacity: 1000, 107 | simulcast_config: %SimulcastConfig{ 108 | enabled: state.simulcast?, 109 | initial_target_variant: fn _track -> :high end 110 | } 111 | } 112 | 113 | :ok = 114 | Engine.add_endpoint(state.rtc_engine_pid, rtc_endpoint, peer_id: peer_id, node: peer_node) 115 | 116 | {:reply, :ok, state} 117 | end 118 | 119 | def handle_call({:add_peer, _, _}, _from, state) do 120 | {:reply, {:error, :occupied}, state} 121 | end 122 | 123 | @impl true 124 | def handle_info(%EndpointMessage{message: {:media_event, data}}, state) do 125 | send(state.peer_channel_pid, {:media_event, data}) 126 | {:noreply, state} 127 | end 128 | 129 | @impl true 130 | def handle_info(%EndpointCrashed{}, state) do 131 | send(state.peer_channel_pid, :endpoint_crashed) 132 | {:noreply, state} 133 | end 134 | 135 | @impl true 136 | def handle_info({:media_event, to, event}, state) do 137 | Engine.message_endpoint(state.rtc_engine_pid, to, {:media_event, event}) 138 | {:noreply, state} 139 | end 140 | 141 | @impl true 142 | def handle_info({:DOWN, _ref, :process, pid, _reason}, %{peer_channel_pid: pid} = state) do 143 | peer_id = state.peer_id 144 | Membrane.Logger.info("Peer #{inspect(state.peer_id)} left") 145 | Engine.remove_endpoint(state.rtc_engine_pid, peer_id) 146 | Membrane.Logger.info("Terminating engine.") 147 | 148 | with :ok <- Engine.terminate(state.rtc_engine_pid, blocking?: true) do 149 | Membrane.Logger.info("Engine terminated.") 150 | {:stop, :normal, state} 151 | else 152 | _ -> 153 | _ = Process.exit(state.rtc_engine_pid, :kill) 154 | {:stop, :normal, state} 155 | end 156 | end 157 | 158 | defp filter_codecs(%Encoding{name: "H264", format_params: fmtp}) do 159 | import Bitwise 160 | 161 | # Only accept constrained baseline 162 | # based on RFC 6184, Table 5. 163 | case fmtp.profile_level_id >>> 16 do 164 | 0x42 -> (fmtp.profile_level_id &&& 0x00_4F_00) == 0x00_40_00 165 | 0x4D -> (fmtp.profile_level_id &&& 0x00_8F_00) == 0x00_80_00 166 | 0x58 -> (fmtp.profile_level_id &&& 0x00_CF_00) == 0x00_C0_00 167 | _otherwise -> false 168 | end 169 | end 170 | 171 | defp filter_codecs(_rtp_mapping), do: false 172 | 173 | defp tracing_metadata() do 174 | [ 175 | {:"library.language", :erlang}, 176 | {:"library.name", :membrane_rtc_engine}, 177 | {:"library.version", "server:#{Application.spec(:membrane_rtc_engine, :vsn)}"} 178 | ] 179 | end 180 | end 181 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/inference_sink.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.InferenceSink do 2 | @moduledoc """ 3 | The Inference Sink is responsible for running inference on incoming raw video frames. 4 | """ 5 | 6 | use Membrane.Sink 7 | alias Membrane.Buffer 8 | alias Membrane.RawVideo 9 | alias EmporiumInference.Image 10 | alias EmporiumNexus.InferenceRequestor 11 | 12 | def_input_pad :input, 13 | demand_unit: :buffers, 14 | mode: :pull, 15 | accepted_format: %Membrane.RawVideo{} 16 | 17 | def_options owner_pid: [ 18 | spec: pid(), 19 | description: "Pid of parent where notifications will be sent to" 20 | ] 21 | 22 | @impl true 23 | def handle_init(_ctx, %{owner_pid: owner_pid}) do 24 | {:ok, pid} = InferenceRequestor.start_link(owner_pid, &request_inference/2) 25 | {[], %{format: nil, orientation: <<0>>, requestor_pid: pid}} 26 | end 27 | 28 | @impl true 29 | def handle_stream_format(:input, %RawVideo{} = format, _ctx, state) do 30 | :ok = InferenceRequestor.request_format(state.requestor_pid, {format, state.orientation}) 31 | {[], %{state | format: format}} 32 | end 33 | 34 | @impl true 35 | def handle_parent_notification({:orientation_changed, data}, _ctx, state) do 36 | :ok = InferenceRequestor.request_format(state.requestor_pid, {state.format, data}) 37 | {[], %{state | orientation: data}} 38 | end 39 | 40 | @impl true 41 | def handle_write(:input, buffer, _ctx, state) do 42 | :ok = InferenceRequestor.request_buffer(state.requestor_pid, buffer) 43 | actions = [demand: :input] 44 | {actions, state} 45 | end 46 | 47 | @impl true 48 | def handle_event(pad, event, ctx, state) do 49 | super(pad, event, ctx, state) 50 | end 51 | 52 | @impl true 53 | def handle_terminate_request(_ctx, state) do 54 | {[terminate: :normal], state} 55 | end 56 | 57 | defp request_inference(%Buffer{} = buffer, {%RawVideo{} = format, orientation}) do 58 | image = build_image(buffer, format, orientation) 59 | {:ok, detections, _durations} = EmporiumInference.YOLOv5.request(image) 60 | {:detections, Membrane.Buffer.get_dts_or_pts(buffer), detections} 61 | # {:ok, classifications, _durations} = EmporiumInference.ResNet.request(image) 62 | # {:classifications, Membrane.Buffer.get_dts_or_pts(buffer), classifications} 63 | end 64 | 65 | defp build_image(%Buffer{} = buffer, %RawVideo{pixel_format: :I420} = format, orientation) do 66 | %Image{ 67 | width: format.width, 68 | height: format.height, 69 | format: :I420, 70 | orientation: build_image_orientation(orientation), 71 | data: buffer.payload 72 | } 73 | end 74 | 75 | defp build_image_orientation(<<0>>), do: :upright 76 | defp build_image_orientation(<<1>>), do: :rotated_90_ccw 77 | defp build_image_orientation(<<2>>), do: :rotated_180 78 | defp build_image_orientation(<<3>>), do: :rotated_90_cw 79 | end 80 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/key_server.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.KeyServer do 2 | @moduledoc """ 3 | Module responsible for generating DTLS key and certificate for use during WebRTC handshake. 4 | """ 5 | 6 | use GenServer 7 | 8 | defmodule State do 9 | @type t :: %__MODULE__{ 10 | dtls_cert: nil | binary(), 11 | dtls_pkey: nil | binary(), 12 | dtls_path: nil | Path.t() 13 | } 14 | 15 | defstruct dtls_cert: nil, dtls_pkey: nil, dtls_path: nil 16 | end 17 | 18 | def start_link(init_arg) do 19 | GenServer.start_link(__MODULE__, init_arg, name: __MODULE__) 20 | end 21 | 22 | @impl GenServer 23 | def init(_) do 24 | {:ok, pid} = ExDTLS.start_link(client_mode: false, dtls_srtp: true) 25 | {:ok, pkey} = ExDTLS.get_pkey(pid) 26 | {:ok, cert} = ExDTLS.get_cert(pid) 27 | :ok = ExDTLS.stop(pid) 28 | 29 | {:ok, path} = Briefly.create() 30 | :ok = File.chmod(path, 0o600) 31 | :ok = File.write(path, "#{cert}\n#{pkey}") 32 | 33 | {:ok, %State{dtls_cert: cert, dtls_pkey: pkey, dtls_path: path}} 34 | end 35 | 36 | @impl GenServer 37 | def handle_call(:get_dtls_cert, _from, state) do 38 | {:reply, {:ok, state.dtls_cert}, state} 39 | end 40 | 41 | @impl GenServer 42 | def handle_call(:get_dtls_pkey, _from, state) do 43 | {:reply, {:ok, state.dtls_pkey}, state} 44 | end 45 | 46 | @impl GenServer 47 | def handle_call(:get_dtls_path, _from, state) do 48 | {:reply, {:ok, state.dtls_path}, state} 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/video_format_tracker.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.VideoFormatTracker do 2 | @moduledoc """ 3 | A filter to be used in the inference pipeline, to tell the owner about the changes in input 4 | formats (such as width and height of the track being streamed) 5 | """ 6 | 7 | use Membrane.Filter 8 | 9 | def_input_pad :input, 10 | availability: :always, 11 | accepted_format: _any, 12 | demand_mode: :auto 13 | 14 | def_output_pad :output, 15 | availability: :always, 16 | accepted_format: _any, 17 | demand_mode: :auto 18 | 19 | @impl true 20 | def handle_init(_ctx, _options) do 21 | {[], %{format: nil}} 22 | end 23 | 24 | @impl true 25 | def handle_stream_format(:input, format, _ctx, %{format: format} = state) do 26 | {[forward: format], state} 27 | end 28 | 29 | @impl true 30 | def handle_stream_format(:input, format, _ctx, state) do 31 | {[notify_parent: {:format_changed, format}, forward: format], %{state | format: format}} 32 | end 33 | 34 | @impl true 35 | def handle_process(:input, buffer, _ctx, state) do 36 | {[{:buffer, {:output, buffer}}], state} 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/video_orientation_extension.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.VideoOrientationExtension do 2 | @moduledoc """ 3 | Module implementing `Membrane.WebRTC.Extension` behaviour for Coordination of Video Orientation 4 | (CVO) inside the RTP Header. 5 | 6 | This extension is described at: 7 | 8 | https://www.tech-invite.com/3m26/toc/tinv-3gpp-26-114_f.html 9 | https://www.arib.or.jp/english/html/overview/doc/STD-T63V12_00/5_Appendix/Rel13/26/26114-d30.pdf 10 | """ 11 | @behaviour Membrane.WebRTC.Extension 12 | alias ExSDP.Attribute.Extmap 13 | alias ExSDP.Media 14 | alias Membrane.WebRTC.Extension 15 | 16 | @name :video_orientation 17 | @uri "urn:3gpp:video-orientation" 18 | 19 | @impl true 20 | def new(opts \\ Keyword.new()), 21 | do: %Extension{ 22 | module: __MODULE__, 23 | rtp_opts: opts, 24 | uri: @uri, 25 | name: @name 26 | } 27 | 28 | @impl Membrane.WebRTC.Extension 29 | def compatible?(:H264), do: true 30 | def compatible?(:VP8), do: true 31 | def compatible?(_), do: false 32 | 33 | @impl Membrane.WebRTC.Extension 34 | def get_rtp_module(_extmap_extension_id, _options, _track_type) do 35 | :no_rtp_module 36 | end 37 | 38 | @impl Membrane.WebRTC.Extension 39 | def add_to_media(media, id, _direction, _payload_types) do 40 | media 41 | |> Media.add_attribute(%Extmap{id: id, uri: @uri}) 42 | end 43 | 44 | @impl Membrane.WebRTC.Extension 45 | def uri do 46 | @uri 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /apps/emporium_nexus/lib/emporium_nexus/video_orientation_tracker.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.VideoOrientationTracker do 2 | @moduledoc """ 3 | A filter to be used in the inference pipeline to tell the Endpoint about the orientation 4 | that is pulled from CVO header 5 | """ 6 | 7 | use Membrane.Filter 8 | alias Membrane.RTP.Header.Extension 9 | 10 | def_input_pad :input, 11 | availability: :always, 12 | accepted_format: _any, 13 | demand_mode: :auto 14 | 15 | def_output_pad :output, 16 | availability: :always, 17 | accepted_format: _any, 18 | demand_mode: :auto 19 | 20 | def_options extension_id: [ 21 | spec: 1..14, 22 | description: "RTP Extension ID of the Video Orientation extension." 23 | ] 24 | 25 | @impl true 26 | def handle_init(_ctx, options) do 27 | {[], %{orientation: nil, extension_id: options.extension_id}} 28 | end 29 | 30 | @impl true 31 | def handle_process(:input, buffer, _ctx, %{orientation: orientation} = state) do 32 | buffer_action = {:buffer, {:output, buffer}} 33 | 34 | case Extension.find(buffer, state.extension_id) do 35 | nil -> 36 | {[buffer_action], state} 37 | 38 | %{data: ^orientation} -> 39 | {[buffer_action], state} 40 | 41 | %{data: data} -> 42 | notify_action = {:notify_parent, {:orientation_changed, data}} 43 | {[notify_action, buffer_action], %{state | orientation: data}} 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /apps/emporium_nexus/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumNexus.Mixfile do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :emporium_nexus, 7 | version: "0.1.0", 8 | build_path: "../../_build", 9 | config_path: "../../config/config.exs", 10 | deps_path: "../../deps", 11 | lockfile: "../../mix.lock", 12 | elixirc_paths: elixirc_paths(Mix.env()), 13 | compilers: Mix.compilers(), 14 | start_permanent: Mix.env() == :prod, 15 | deps: deps() 16 | ] 17 | end 18 | 19 | def application do 20 | [ 21 | mod: {EmporiumNexus.Application, []}, 22 | extra_applications: [:logger, :runtime_tools] 23 | ] 24 | end 25 | 26 | defp elixirc_paths(:test), do: ["lib", "test/support"] 27 | defp elixirc_paths(_), do: ["lib"] 28 | 29 | defp deps do 30 | # - membrane_core: use-push-flow-0.11 is a branch based on 0.11.3 in March 2023 31 | membrane_core = [github: "membraneframework/membrane_core", branch: "use-push-flow-0.11"] 32 | 33 | [ 34 | {:briefly, "~> 0.4.1"}, 35 | {:emporium_environment, in_umbrella: true}, 36 | {:emporium_inference_yolov5, in_umbrella: true}, 37 | {:membrane_core, membrane_core ++ [override: true]}, 38 | {:membrane_rtc_engine, "~> 0.11.0", override: true} 39 | ] 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /apps/emporium_proxy/config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # The Proxy application should be configured from Umbrella root. 4 | # 5 | # - `:applications` should be a list of application names which will be dynamically 6 | # included as dependencies. 7 | # 8 | # - `:endpoints` should be a list of {endpoint_module, mount_point}. 9 | 10 | config :emporium_proxy, EmporiumProxy, 11 | applications: [], 12 | endpoints: [] 13 | -------------------------------------------------------------------------------- /apps/emporium_proxy/lib/emporium_proxy.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumProxy do 2 | @otp_app Mix.Project.config()[:app] 3 | 4 | def get_port, do: String.to_integer(System.get_env("PORT")) 5 | def get_endpoints, do: Keyword.get(get_config(), :endpoints, []) 6 | defp get_config, do: Application.get_env(@otp_app, __MODULE__) || [] 7 | end 8 | -------------------------------------------------------------------------------- /apps/emporium_proxy/lib/emporium_proxy/application.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumProxy.Application do 2 | use Application 3 | 4 | def start(_type, _args) do 5 | children = build_children() 6 | opts = [strategy: :one_for_one, name: EmporiumProxy.Supervisor] 7 | Supervisor.start_link(children, opts) 8 | end 9 | 10 | defp build_children do 11 | case System.get_env("ROLE") do 12 | "WEB" -> [EmporiumProxy.Cowboy.build_child_spec()] 13 | _ -> [] 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /apps/emporium_proxy/lib/emporium_proxy/cowboy.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumProxy.Cowboy do 2 | def build_child_spec do 3 | Plug.Cowboy.child_spec( 4 | plug: nil, 5 | scheme: :http, 6 | options: [ 7 | port: EmporiumProxy.get_port(), 8 | dispatch: build_dispatch(), 9 | protocol_options: [ 10 | idle_timeout: 60000, 11 | inactivity_timeout: 60000 12 | ] 13 | ] 14 | ) 15 | end 16 | 17 | defp build_dispatch do 18 | [{:_, build_dispatch_websockets() ++ [build_dispatch_plug()]}] 19 | end 20 | 21 | defp build_dispatch_websockets do 22 | Enum.flat_map(EmporiumProxy.get_endpoints(), fn {endpoint, mount} -> 23 | Enum.map(endpoint.__sockets__(), fn {path, socket_module, options} -> 24 | path = Path.join([mount, path, "websocket"]) 25 | socket_options = build_dispatch_socket_options(endpoint, socket_module, options) 26 | handler_state = {{endpoint, mount}, {socket_module, socket_options}} 27 | {path, EmporiumProxy.WebsocketHandler, handler_state} 28 | end) 29 | end) 30 | end 31 | 32 | defp build_dispatch_plug do 33 | {:_, Plug.Cowboy.Handler, {EmporiumProxy.Router, []}} 34 | end 35 | 36 | defp build_dispatch_socket_options(_endpoint, _socket_module, _options) do 37 | :websocket 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /apps/emporium_proxy/lib/emporium_proxy/router.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumProxy.Router do 2 | use Phoenix.Router 3 | 4 | for {endpoint, mount} <- EmporiumProxy.get_endpoints() do 5 | forward(mount, endpoint) 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /apps/emporium_proxy/lib/emporium_proxy/websocket_handler.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumProxy.WebsocketHandler do 2 | @moduledoc """ 3 | Module responsible for handling all Websockets traffic. 4 | 5 | Since Phoenix 1.7, the Cowboy2 handler for Websocket has been removed and replaced with 6 | an unified module Plug.Cowboy.Handler in plug_cowboy. Simply removing the path prefix 7 | (due to the script_name not having been fixed & configured) is however not enough for the 8 | workflow to work properly, as the upgrade function would be called with the wrong arguments. 9 | 10 | Hence the response from `Plug.Cowboy.Handler.init/2` is massaged slightly. 11 | """ 12 | 13 | @upstream Plug.Cowboy.Handler 14 | 15 | def init(request, {{endpoint, mount}, options}) do 16 | request_path = String.replace_prefix(request.path, mount, "") 17 | request = %{request | path: request_path} 18 | 19 | case @upstream.init(request, {endpoint, options}) do 20 | {@upstream, req, state, options} -> {__MODULE__, req, state, options} 21 | {:ok, req, {endpoint, opts}} -> {:ok, req, {endpoint, opts}} 22 | end 23 | end 24 | 25 | def upgrade(req, env, __MODULE__, state, options) do 26 | @upstream.upgrade(req, env, @upstream, state, options) 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /apps/emporium_proxy/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumProxy.Mixfile do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :emporium_proxy, 7 | version: "0.1.0", 8 | build_path: "../../_build", 9 | config_path: "../../config/config.exs", 10 | deps_path: "../../deps", 11 | lockfile: "../../mix.lock", 12 | elixirc_paths: elixirc_paths(Mix.env()), 13 | compilers: Mix.compilers(), 14 | start_permanent: Mix.env() == :prod, 15 | deps: proxy_deps() ++ deps() 16 | ] 17 | end 18 | 19 | def application do 20 | [ 21 | mod: {EmporiumProxy.Application, []}, 22 | extra_applications: [:logger, :runtime_tools] 23 | ] 24 | end 25 | 26 | defp elixirc_paths(:test), do: ["lib", "test/support"] 27 | defp elixirc_paths(_), do: ["lib"] 28 | 29 | defp proxy_deps do 30 | (Application.get_env(:emporium_proxy, EmporiumProxy) || []) 31 | |> Keyword.get(:applications, []) 32 | |> Enum.map(&{&1, in_umbrella: true}) 33 | end 34 | 35 | defp deps do 36 | [ 37 | {:emporium_environment, in_umbrella: true}, 38 | {:phoenix, "~> 1.7.2"}, 39 | {:plug_cowboy, "~> 2.6.1"} 40 | ] 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /apps/emporium_web/.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | import_deps: [:phoenix], 3 | inputs: ["*.{ex,exs}", "{config,lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /apps/emporium_web/.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where 3rd-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | multi_snake_web-*.tar 24 | 25 | # Ignore assets that are produced by build tools. 26 | /priv/static/assets/ 27 | 28 | # Ignore digested assets cache. 29 | /priv/static/cache_manifest.json 30 | 31 | # In case you use Node.js/npm, you want to ignore these. 32 | npm-debug.log 33 | /assets/node_modules/ 34 | 35 | -------------------------------------------------------------------------------- /apps/emporium_web/assets/css/screen-emporium.css: -------------------------------------------------------------------------------- 1 | .Emporium-container { 2 | width: 100%; 3 | height: 100%; 4 | background: gray; 5 | position: absolute; 6 | color: #ffffff; 7 | display: flex; 8 | flex-direction: column; 9 | align-items: center; 10 | } 11 | 12 | .Emporium-viewport { 13 | flex-grow: 1; 14 | flex-shrink: 1; 15 | width: 100%; 16 | max-height: 100%; 17 | overflow: hidden; 18 | display: flex; 19 | align-items: center; 20 | padding: 2em; 21 | } 22 | 23 | .Emporium-board { 24 | flex-grow: 1; 25 | flex-shrink: 1; 26 | max-height: 100%; 27 | max-width: 100%; 28 | } 29 | 30 | .Emporium-board-background { 31 | fill: black; 32 | } 33 | 34 | .Emporium-board-boundary { 35 | fill: white; 36 | } 37 | 38 | .Emporium-cell-cherry { 39 | fill: red; 40 | } 41 | 42 | .Emporium-cell-Emporium { 43 | fill: yellow; 44 | } 45 | 46 | .Emporium-cell-obstacle { 47 | fill: white; 48 | } 49 | -------------------------------------------------------------------------------- /apps/emporium_web/assets/css/screen-generic.css: -------------------------------------------------------------------------------- 1 | @import "bootstrap/dist/css/bootstrap.min.css"; 2 | @import "./screen-emporium.css"; 3 | 4 | .alert { 5 | padding: 15px; 6 | margin-bottom: 20px; 7 | border: 1px solid transparent; 8 | border-radius: 4px; 9 | } 10 | .alert-info { 11 | color: #31708f; 12 | background-color: #d9edf7; 13 | border-color: #bce8f1; 14 | } 15 | .alert-warning { 16 | color: #8a6d3b; 17 | background-color: #fcf8e3; 18 | border-color: #faebcc; 19 | } 20 | .alert-danger { 21 | color: #a94442; 22 | background-color: #f2dede; 23 | border-color: #ebccd1; 24 | } 25 | .alert p { 26 | margin-bottom: 0; 27 | } 28 | .alert:empty { 29 | display: none; 30 | } 31 | .invalid-feedback { 32 | color: #a94442; 33 | display: block; 34 | margin: -1rem 0 2rem; 35 | } 36 | 37 | /* LiveView specific classes for your customization */ 38 | .phx-no-feedback.invalid-feedback, 39 | .phx-no-feedback .invalid-feedback { 40 | display: none; 41 | } 42 | 43 | .phx-click-loading { 44 | opacity: 0.5; 45 | transition: opacity 1s ease-out; 46 | } 47 | 48 | .phx-loading{ 49 | cursor: wait; 50 | } 51 | 52 | .phx-modal { 53 | opacity: 1!important; 54 | position: fixed; 55 | z-index: 1; 56 | left: 0; 57 | top: 0; 58 | width: 100%; 59 | height: 100%; 60 | overflow: auto; 61 | background-color: rgba(0,0,0,0.4); 62 | } 63 | 64 | .phx-modal-content { 65 | background-color: #fefefe; 66 | margin: 15vh auto; 67 | padding: 20px; 68 | border: 1px solid #888; 69 | width: 80%; 70 | } 71 | 72 | .phx-modal-close { 73 | color: #aaa; 74 | float: right; 75 | font-size: 28px; 76 | font-weight: bold; 77 | } 78 | 79 | .phx-modal-close:hover, 80 | .phx-modal-close:focus { 81 | color: black; 82 | text-decoration: none; 83 | cursor: pointer; 84 | } 85 | 86 | body { 87 | background: black; 88 | min-height: 100vh; 89 | min-height: -webkit-fill-available; 90 | } 91 | 92 | [data-phx-main] { 93 | flex-grow: 1; 94 | } 95 | 96 | #main { 97 | display: flex; 98 | width: 100%; 99 | height: 100%; 100 | } 101 | 102 | video { 103 | width: 100%; 104 | height: 100%; 105 | object-fit: contain; 106 | } 107 | 108 | .list-detections { 109 | width: 100%; 110 | height: 100%; 111 | position: absolute; 112 | top: 0; 113 | right: 0; 114 | bottom: 0; 115 | left: 0; 116 | background: rgba(100, 0, 0, .25); 117 | } 118 | 119 | svg#webrtc-overlay { 120 | z-index: 5; 121 | position: absolute; 122 | top: 0; 123 | left: 0; 124 | width: 100%; 125 | height: 100%; 126 | } 127 | 128 | svg#webrtc-overlay .detection rect { 129 | fill: transparent; 130 | stroke: white; 131 | stroke-width: 2pt; 132 | vector-effect: non-scaling-stroke; 133 | } 134 | 135 | svg#webrtc-overlay .detection[data-class-name='hot dog'] rect { 136 | stroke: green; 137 | stroke-width: 4pt; 138 | } 139 | 140 | svg#webrtc-overlay .detection[data-class-name='hot dog'] text { 141 | fill: green; 142 | font-weight: bold; 143 | } 144 | 145 | #hotdog-overlay { 146 | z-index: 10; 147 | position: absolute; 148 | top: 0; 149 | left: 0; 150 | width: 100%; 151 | height: 100%; 152 | display: flex; 153 | flex-direction: column; 154 | align-items: center; 155 | justify-content: center; 156 | } 157 | 158 | .hotdog-message { 159 | border-radius: 4px; 160 | flex-basis: auto; 161 | font-size: 4em; 162 | padding: 0.5em; 163 | background: rgba(255, 255, 255, 0.4); 164 | } 165 | 166 | @supports (-webkit-backdrop-filter: none) or (backdrop-filter: none) { 167 | .hotdog-message { 168 | -webkit-backdrop-filter: blur(10px); 169 | backdrop-filter: blur(10px); 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /apps/emporium_web/assets/esbuild.js: -------------------------------------------------------------------------------- 1 | // https://cloudless.studio/wrapping-your-head-around-assets-in-phoenix-1-6 2 | 3 | const esbuild = require('esbuild') 4 | 5 | let mode = 'build' 6 | let options = { 7 | entryPoints: ['js/screen-generic.js'], 8 | bundle: true, 9 | logLevel: 'info', 10 | target: 'es2016', 11 | outdir: '../priv/static/assets' 12 | } 13 | 14 | process.argv.slice(2).forEach((arg) => { 15 | if (arg === '--watch') { 16 | mode = 'watch' 17 | } else if (arg === '--deploy') { 18 | mode = 'deploy' 19 | } 20 | }) 21 | 22 | if (mode === 'watch') { 23 | options = {watch: true, sourcemap: 'inline', ...options} 24 | } else if (mode === 'deploy') { 25 | options = {minify: true, ...options} 26 | } 27 | 28 | esbuild.build(options).then((result) => { 29 | if (mode === 'watch') { 30 | process.stdin.pipe(process.stdout) 31 | process.stdin.on('end', () => { result.stop() }) 32 | } 33 | }).catch((error) => { 34 | process.exit(1) 35 | }) 36 | -------------------------------------------------------------------------------- /apps/emporium_web/assets/js/screen-generic.js: -------------------------------------------------------------------------------- 1 | import "../css/screen-generic.css" 2 | 3 | import "phoenix_html" 4 | import topbar from "topbar" 5 | import {Socket} from "phoenix" 6 | import {LiveSocket} from "phoenix_live_view" 7 | import {MembraneWebRTC, Peer, SerializedMediaEvent, TrackContext} from "@jellyfish-dev/membrane-webrtc-js"; 8 | 9 | let WebRTC = { 10 | async setupStream () { 11 | let originalMediaDevices = await navigator.mediaDevices.enumerateDevices() 12 | if (!(originalMediaDevices.some((device) => device.kind === "videoinput"))) { 13 | throw new Error("No video input") 14 | } 15 | await navigator.mediaDevices.getUserMedia({audio: false, video: true}) 16 | let mediaDevices = await navigator.mediaDevices.enumerateDevices() 17 | let videoDevices = mediaDevices.filter((device) => device.kind === "videoinput") 18 | let videoStream = null 19 | for (const videoDevice of videoDevices) { 20 | try { 21 | videoStream = await navigator.mediaDevices.getUserMedia({ 22 | video: { 23 | width: { max: 1280, ideal: 1280, min: 320 }, 24 | height: { max: 720, ideal: 720, min: 320 }, 25 | frameRate: { max: 30, ideal: 24 }, 26 | deviceId: { exact: videoDevice.deviceId }, 27 | facingMode: { exact: 'environment' } 28 | } 29 | }); 30 | console.log('got stream', videoStream) 31 | break 32 | } catch (error) { 33 | console.error("Error while getting local video stream", videoDevice, error) 34 | } 35 | } 36 | if (!videoStream) { 37 | // No environment cam, try looser constraints 38 | for (const videoDevice of videoDevices) { 39 | try { 40 | videoStream = await navigator.mediaDevices.getUserMedia({ 41 | video: { 42 | width: { max: 1280, ideal: 1280, min: 320 }, 43 | height: { max: 720, ideal: 720, min: 320 }, 44 | frameRate: { max: 30, ideal: 24 }, 45 | deviceId: { exact: videoDevice.deviceId } 46 | } 47 | }); 48 | break 49 | } catch (error) { 50 | console.error("Error while getting local video stream", videoDevice, error) 51 | } 52 | } 53 | } 54 | if (!videoStream) { 55 | throw new Error("Unable to acquire video stream") 56 | } 57 | return videoStream 58 | }, 59 | setupElement (element, stream) { 60 | element.autoplay = true 61 | element.playsInline = true 62 | element.muted = true 63 | element.srcObject = stream 64 | } 65 | } 66 | 67 | let WebRTCHook = { 68 | mounted () { 69 | window.addEventListener(`phx:acquire-streams`, (e) => { 70 | WebRTC.setupStream().then((videoStream) => { 71 | this.videoStream = videoStream 72 | this.videoElement = this.el.querySelector('video#webrtc-video'); 73 | WebRTC.setupElement(this.videoElement, this.videoStream) 74 | this.pushEvent("streams-acquired", {}) 75 | }).catch((error) => { 76 | this.pushEvent("error", {reason: error.message}) 77 | }) 78 | }) 79 | window.addEventListener(`phx:connect-session`, (e) => { 80 | this.peerID = e.detail.peer_id 81 | this.sessionID = e.detail.session_id 82 | this.membraneSession = new MembraneWebRTC({ 83 | callbacks: { 84 | onSendMediaEvent: (mediaEvent) => { 85 | this.pushEvent("mediaEvent", {data: mediaEvent}) 86 | }, 87 | onConnectionError: (message) => { 88 | this.pushEvent("error", {reason: message}) 89 | }, 90 | onJoinSuccess: (peerID, peers) => { 91 | let trackSettings = null; 92 | this.videoStream.getTracks().forEach((track) => { 93 | trackSettings = track.getSettings() 94 | this.videoTrackID = this.membraneSession.addTrack( 95 | track, 96 | this.videoStream, 97 | {}, 98 | { enabled: true, active_encodings: ["h"] } 99 | ) 100 | }) 101 | this.pushEvent("connected", { 102 | video: { 103 | width: trackSettings.width, 104 | height: trackSettings.height 105 | } 106 | }) 107 | }, 108 | onJoinError: (metadata) => { 109 | this.pushEvent("error", {reason: metadata}) 110 | } 111 | } 112 | }); 113 | this.membraneSession.join({ 114 | displayName: this.peerID 115 | }); 116 | }) 117 | window.addEventListener(`phx:handle-media-event`, (e) => { 118 | this.membraneSession.receiveMediaEvent(e.detail.data) 119 | }); 120 | window.addEventListener(`phx:handle-simulcast-config`, (e) => { 121 | return; 122 | }); 123 | } 124 | } 125 | 126 | topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"}) 127 | 128 | let liveSocket = new LiveSocket("/live", Socket, { 129 | params: { 130 | _csrf_token: document.querySelector("meta[name='csrf-token']").getAttribute("content") 131 | }, 132 | hooks: { 133 | WebRTC: WebRTCHook 134 | } 135 | }) 136 | 137 | liveSocket.connect() 138 | 139 | window.addEventListener("phx:page-loading-start", info => topbar.show()) 140 | window.addEventListener("phx:page-loading-stop", info => topbar.hide()) 141 | window.liveSocket = liveSocket 142 | -------------------------------------------------------------------------------- /apps/emporium_web/assets/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "emporium_web_assets", 3 | "version": "0.1.0", 4 | "lockfileVersion": 2, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "emporium_web_assets", 9 | "version": "0.1.0", 10 | "dependencies": { 11 | "@jellyfish-dev/membrane-webrtc-js": "^0.4.5", 12 | "phoenix": "file:../../../deps/phoenix", 13 | "phoenix_html": "file:../../../deps/phoenix_html", 14 | "phoenix_live_view": "file:../../../deps/phoenix_live_view", 15 | "topbar": "^1.0.1" 16 | }, 17 | "devDependencies": { 18 | "bootstrap": "^5.1.3", 19 | "esbuild": "^0.12.24" 20 | } 21 | }, 22 | "../../../deps/phoenix": { 23 | "version": "1.7.2", 24 | "integrity": "sha512-YZ5mXU2cHd/VUuPSv5AELv/bT+0uJ94PQjiuDS7jVNBU8n75WzrpWzQ5OJ5qSuMVCqOtZK0GJd6WlFVUSmTKAQ==", 25 | "license": "MIT" 26 | }, 27 | "../../../deps/phoenix_html": { 28 | "version": "3.3.1", 29 | "integrity": "sha512-zv7PIZk0MPkF0ax8n465Q6w86+sGAy5cTem6KcbkUbdgxGc0y3WZmzkM2bSlYdSGbLEZfjXxos1G72xXsha6xA==" 30 | }, 31 | "../../../deps/phoenix_live_view": { 32 | "version": "0.18.18", 33 | "integrity": "sha512-bCA7+1T3IrUsPgvkQn/GFjppuvKlnZFzOi8bo1vRS/fz57Yky1MMaOux3mDv4LCBWRiXBN6Zycp0/6wDTvL8dg==", 34 | "license": "MIT" 35 | }, 36 | "node_modules/@jellyfish-dev/membrane-webrtc-js": { 37 | "version": "0.4.5", 38 | "resolved": "https://registry.npmjs.org/@jellyfish-dev/membrane-webrtc-js/-/membrane-webrtc-js-0.4.5.tgz", 39 | "integrity": "sha512-YwfecgZZt6K6Cmq7AAf6TqDM0whLZM6FpxuvxXgBKnAJi3DMg+m3GqYTklM3r1g2MneIVIsPiNtjYszGaBsAvg==", 40 | "dependencies": { 41 | "uuid": "^8.3.2" 42 | } 43 | }, 44 | "node_modules/@popperjs/core": { 45 | "version": "2.11.5", 46 | "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.5.tgz", 47 | "integrity": "sha512-9X2obfABZuDVLCgPK9aX0a/x4jaOEweTTWE2+9sr0Qqqevj2Uv5XorvusThmc9XGYpS9yI+fhh8RTafBtGposw==", 48 | "dev": true, 49 | "peer": true, 50 | "funding": { 51 | "type": "opencollective", 52 | "url": "https://opencollective.com/popperjs" 53 | } 54 | }, 55 | "node_modules/bootstrap": { 56 | "version": "5.1.3", 57 | "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-5.1.3.tgz", 58 | "integrity": "sha512-fcQztozJ8jToQWXxVuEyXWW+dSo8AiXWKwiSSrKWsRB/Qt+Ewwza+JWoLKiTuQLaEPhdNAJ7+Dosc9DOIqNy7Q==", 59 | "dev": true, 60 | "funding": { 61 | "type": "opencollective", 62 | "url": "https://opencollective.com/bootstrap" 63 | }, 64 | "peerDependencies": { 65 | "@popperjs/core": "^2.10.2" 66 | } 67 | }, 68 | "node_modules/esbuild": { 69 | "version": "0.12.29", 70 | "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.12.29.tgz", 71 | "integrity": "sha512-w/XuoBCSwepyiZtIRsKsetiLDUVGPVw1E/R3VTFSecIy8UR7Cq3SOtwKHJMFoVqqVG36aGkzh4e8BvpO1Fdc7g==", 72 | "dev": true, 73 | "hasInstallScript": true, 74 | "bin": { 75 | "esbuild": "bin/esbuild" 76 | } 77 | }, 78 | "node_modules/phoenix": { 79 | "resolved": "../../../deps/phoenix", 80 | "link": true 81 | }, 82 | "node_modules/phoenix_html": { 83 | "resolved": "../../../deps/phoenix_html", 84 | "link": true 85 | }, 86 | "node_modules/phoenix_live_view": { 87 | "resolved": "../../../deps/phoenix_live_view", 88 | "link": true 89 | }, 90 | "node_modules/topbar": { 91 | "version": "1.0.1", 92 | "resolved": "https://registry.npmjs.org/topbar/-/topbar-1.0.1.tgz", 93 | "integrity": "sha512-HZqQSMBiG29vcjOrqKCM9iGY/h69G5gQH7ae83ZCPz5uPmbQKwK0sMEqzVDBiu64tWHJ+kk9NApECrF+FAAvRA==" 94 | }, 95 | "node_modules/uuid": { 96 | "version": "8.3.2", 97 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", 98 | "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", 99 | "bin": { 100 | "uuid": "dist/bin/uuid" 101 | } 102 | } 103 | }, 104 | "dependencies": { 105 | "@jellyfish-dev/membrane-webrtc-js": { 106 | "version": "0.4.5", 107 | "resolved": "https://registry.npmjs.org/@jellyfish-dev/membrane-webrtc-js/-/membrane-webrtc-js-0.4.5.tgz", 108 | "integrity": "sha512-YwfecgZZt6K6Cmq7AAf6TqDM0whLZM6FpxuvxXgBKnAJi3DMg+m3GqYTklM3r1g2MneIVIsPiNtjYszGaBsAvg==", 109 | "requires": { 110 | "uuid": "^8.3.2" 111 | } 112 | }, 113 | "@popperjs/core": { 114 | "version": "2.11.5", 115 | "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.5.tgz", 116 | "integrity": "sha512-9X2obfABZuDVLCgPK9aX0a/x4jaOEweTTWE2+9sr0Qqqevj2Uv5XorvusThmc9XGYpS9yI+fhh8RTafBtGposw==", 117 | "dev": true, 118 | "peer": true 119 | }, 120 | "bootstrap": { 121 | "version": "5.1.3", 122 | "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-5.1.3.tgz", 123 | "integrity": "sha512-fcQztozJ8jToQWXxVuEyXWW+dSo8AiXWKwiSSrKWsRB/Qt+Ewwza+JWoLKiTuQLaEPhdNAJ7+Dosc9DOIqNy7Q==", 124 | "dev": true, 125 | "requires": {} 126 | }, 127 | "esbuild": { 128 | "version": "0.12.29", 129 | "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.12.29.tgz", 130 | "integrity": "sha512-w/XuoBCSwepyiZtIRsKsetiLDUVGPVw1E/R3VTFSecIy8UR7Cq3SOtwKHJMFoVqqVG36aGkzh4e8BvpO1Fdc7g==", 131 | "dev": true 132 | }, 133 | "phoenix": { 134 | "version": "file:../../../deps/phoenix" 135 | }, 136 | "phoenix_html": { 137 | "version": "file:../../../deps/phoenix_html" 138 | }, 139 | "phoenix_live_view": { 140 | "version": "file:../../../deps/phoenix_live_view" 141 | }, 142 | "topbar": { 143 | "version": "1.0.1", 144 | "resolved": "https://registry.npmjs.org/topbar/-/topbar-1.0.1.tgz", 145 | "integrity": "sha512-HZqQSMBiG29vcjOrqKCM9iGY/h69G5gQH7ae83ZCPz5uPmbQKwK0sMEqzVDBiu64tWHJ+kk9NApECrF+FAAvRA==" 146 | }, 147 | "uuid": { 148 | "version": "8.3.2", 149 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", 150 | "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /apps/emporium_web/assets/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "emporium_web_assets", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "build": "node esbuild.js", 7 | "deploy": "node esbuild.js --deploy", 8 | "watch": "node esbuild.js --watch" 9 | }, 10 | "dependencies": { 11 | "@jellyfish-dev/membrane-webrtc-js": "^0.4.5", 12 | "phoenix": "file:../../../deps/phoenix", 13 | "phoenix_html": "file:../../../deps/phoenix_html", 14 | "phoenix_live_view": "file:../../../deps/phoenix_live_view", 15 | "topbar": "^1.0.1" 16 | }, 17 | "devDependencies": { 18 | "bootstrap": "^5.1.3", 19 | "esbuild": "^0.12.24" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /apps/emporium_web/config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :emporium_web, generators: [context_app: :emporium] 4 | 5 | config :emporium_web, EmporiumWeb.Endpoint, 6 | render_errors: [view: EmporiumWeb.ErrorView, accepts: ~w(html json)], 7 | pubsub_server: EmporiumWeb.PubSub 8 | 9 | import_config "#{config_env()}.exs" 10 | -------------------------------------------------------------------------------- /apps/emporium_web/config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :emporium_web, EmporiumWeb.Endpoint, 4 | debug_errors: true, 5 | code_reloader: true, 6 | check_origin: false, 7 | watchers: [ 8 | node: ["esbuild.js", "--watch", cd: Path.expand("../assets", __DIR__)] 9 | ], 10 | live_reload: [ 11 | patterns: [ 12 | ~r"priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$", 13 | ~r"priv/gettext/.*(po)$", 14 | ~r"lib/emporium_web/(live|views)/.*(ex)$", 15 | ~r"lib/emporium_web/templates/.*(eex)$" 16 | ] 17 | ] 18 | -------------------------------------------------------------------------------- /apps/emporium_web/config/prod.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :emporium_web, EmporiumWeb.Endpoint, 4 | cache_static_manifest: "priv/static/cache_manifest.json", 5 | force_ssl: [rewrite_on: [:x_forwarded_proto]], 6 | server: true 7 | -------------------------------------------------------------------------------- /apps/emporium_web/config/test.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :emporium_web, EmporiumWeb.Endpoint, 4 | http: [port: {:system, "URL_PORT", 4010}], 5 | secret_key_base: :base64.encode(:crypto.strong_rand_bytes(128)), 6 | live_view: [signing_salt: :base64.encode(:crypto.strong_rand_bytes(128))] 7 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb do 2 | def controller do 3 | quote do 4 | use Phoenix.Controller, namespace: EmporiumWeb 5 | import Plug.Conn 6 | import EmporiumWeb.Gettext 7 | alias EmporiumWeb.Router.Helpers, as: Routes 8 | end 9 | end 10 | 11 | def view do 12 | quote do 13 | use Phoenix.View, 14 | root: "lib/emporium_web/templates", 15 | namespace: EmporiumWeb 16 | 17 | import Phoenix.Controller, 18 | only: [get_flash: 1, get_flash: 2, view_module: 1, view_template: 1] 19 | 20 | import Phoenix.Component 21 | unquote(view_helpers()) 22 | end 23 | end 24 | 25 | def live_view do 26 | quote do 27 | use Phoenix.LiveView, layout: {EmporiumWeb.LayoutView, :live} 28 | unquote(view_helpers()) 29 | end 30 | end 31 | 32 | def live_component do 33 | quote do 34 | use Phoenix.LiveComponent 35 | unquote(view_helpers()) 36 | end 37 | end 38 | 39 | def component do 40 | quote do 41 | use Phoenix.Component 42 | unquote(view_helpers()) 43 | end 44 | end 45 | 46 | def router do 47 | quote do 48 | use Phoenix.Router 49 | import Plug.Conn 50 | import Phoenix.Controller 51 | import Phoenix.LiveView.Router 52 | end 53 | end 54 | 55 | def channel do 56 | quote do 57 | use Phoenix.Channel 58 | import EmporiumWeb.Gettext 59 | end 60 | end 61 | 62 | defp view_helpers do 63 | quote do 64 | use Phoenix.HTML 65 | import Phoenix.LiveView.Helpers 66 | import Phoenix.View 67 | import EmporiumWeb.ErrorHelpers 68 | import EmporiumWeb.Gettext 69 | alias EmporiumWeb.Router.Helpers, as: Routes 70 | end 71 | end 72 | 73 | @doc """ 74 | When used, dispatch to the appropriate controller/view/etc. 75 | """ 76 | defmacro __using__(which) when is_atom(which) do 77 | apply(__MODULE__, which, []) 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/application.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.Application do 2 | @moduledoc false 3 | use Application 4 | 5 | @impl true 6 | def start(_type, _args) do 7 | children = [ 8 | {Phoenix.PubSub, name: EmporiumWeb.PubSub}, 9 | EmporiumWeb.Presence, 10 | EmporiumWeb.Telemetry, 11 | EmporiumWeb.Endpoint 12 | ] 13 | 14 | opts = [strategy: :one_for_one, name: EmporiumWeb.Supervisor] 15 | Supervisor.start_link(children, opts) 16 | end 17 | 18 | @impl true 19 | def config_change(changed, _new, removed) do 20 | EmporiumWeb.Endpoint.config_change(changed, removed) 21 | :ok 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/controllers/page_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.PageController do 2 | use EmporiumWeb, :controller 3 | 4 | def index(conn, _params) do 5 | render(conn, "index.html") 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/endpoint.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.Endpoint do 2 | @otp_app Mix.Project.config()[:app] 3 | use Phoenix.Endpoint, otp_app: @otp_app 4 | 5 | @session_options [ 6 | store: :cookie, 7 | key: "_emporium_web_key", 8 | signing_salt: "sRR3qZA6" 9 | ] 10 | 11 | socket "/live", Phoenix.LiveView.Socket, websocket: [connect_info: [session: @session_options]] 12 | 13 | plug Plug.Static, 14 | at: "/", 15 | from: :emporium_web, 16 | gzip: false, 17 | only: ~w(assets favicon.ico robots.txt) 18 | 19 | if code_reloading? do 20 | socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket 21 | plug Phoenix.LiveReloader 22 | plug Phoenix.CodeReloader 23 | end 24 | 25 | plug Phoenix.LiveDashboard.RequestLogger, 26 | param_key: "request_logger", 27 | cookie_key: "request_logger" 28 | 29 | plug Plug.RequestId 30 | plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] 31 | 32 | plug Plug.Parsers, 33 | parsers: [:urlencoded, :multipart, :json], 34 | pass: ["*/*"], 35 | json_decoder: Phoenix.json_library() 36 | 37 | plug Plug.MethodOverride 38 | plug Plug.Head 39 | plug Plug.Session, @session_options 40 | plug EmporiumWeb.Router 41 | 42 | def init(_, config) do 43 | EmporiumEnvironment.Endpoint.init(config) 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/gettext.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.Gettext do 2 | @moduledoc """ 3 | A module providing Internationalization with a gettext-based API. 4 | 5 | By using [Gettext](https://hexdocs.pm/gettext), 6 | your module gains a set of macros for translations, for example: 7 | 8 | import EmporiumWeb.Gettext 9 | 10 | # Simple translation 11 | gettext("Here is the string to translate") 12 | 13 | # Plural translation 14 | ngettext("Here is the string to translate", 15 | "Here are the strings to translate", 16 | 3) 17 | 18 | # Domain-based translation 19 | dgettext("errors", "Here is the error message to translate") 20 | 21 | See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage. 22 | """ 23 | use Gettext, otp_app: :emporium_web 24 | end 25 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/presence.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.Presence do 2 | use Phoenix.Presence, otp_app: :emporium_web, pubsub_server: EmporiumWeb.PubSub 3 | end 4 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/router.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.Router do 2 | use EmporiumWeb, :router 3 | 4 | pipeline :browser do 5 | plug :accepts, ["html"] 6 | plug :fetch_session 7 | plug :fetch_live_flash 8 | plug :put_root_layout, {EmporiumWeb.LayoutView, :root} 9 | plug :protect_from_forgery 10 | plug :put_secure_browser_headers 11 | end 12 | 13 | pipeline :api do 14 | plug :accepts, ["json"] 15 | end 16 | 17 | scope "/", EmporiumWeb do 18 | pipe_through :browser 19 | live "/", SessionLive, :index 20 | end 21 | 22 | import Phoenix.LiveDashboard.Router 23 | 24 | scope "/" do 25 | pipe_through :browser 26 | 27 | live_dashboard "/dashboard", 28 | metrics: EmporiumWeb.Telemetry 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/session_live.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.SessionLive do 2 | use Phoenix.LiveView 3 | alias EmporiumNexus.InferenceSession 4 | 5 | @typedoc """ 6 | Describes the status of the Session. 7 | 8 | - `:pending`: The LiveView was rendred initially; there is no WebRTC connection / media tracks. 9 | - `:acquiring`: The client has been instructed to acquire media tracks. The LiveView instructs 10 | the to acquire streams (event `"acquire-streams"`), and waits for `"streams-acquired"` event 11 | - `:connecting`: The client has been instructed to connect via WebRTC to the Inference Session 12 | - `:connected`: WebRTC connection is good 13 | - {:error, reason}: WebRTC initiation or connection issue, permission issue etc 14 | """ 15 | @type status :: :pending | :acquiring | :connecting | :connected | {:error, reason :: term()} 16 | 17 | alias EmporiumNexus.InferenceSession 18 | 19 | def mount(_params, _session, socket) do 20 | if connected?(socket) do 21 | socket 22 | |> assign(:status, :acquiring) 23 | |> push_event("acquire-streams", %{}) 24 | |> (&{:ok, &1}).() 25 | else 26 | socket 27 | |> assign(:status, :pending) 28 | |> (&{:ok, &1}).() 29 | end 30 | end 31 | 32 | def handle_event("streams-acquired", _params, %{assigns: %{status: :acquiring}} = socket) do 33 | session_id = to_string(:erlang.unique_integer([:positive])) 34 | peer_id = to_string(:erlang.unique_integer([:positive])) 35 | {:ok, session_pid} = InferenceSession.start_link(session_id: session_id, simulcast?: true) 36 | :ok = InferenceSession.add_peer_channel(session_pid, self(), peer_id) 37 | 38 | socket 39 | |> assign(:status, :connecting) 40 | |> assign(:session_id, session_id) 41 | |> assign(:session_pid, session_pid) 42 | |> assign(:peer_id, peer_id) 43 | |> push_event("connect-session", %{session_id: session_id, peer_id: peer_id}) 44 | |> (&{:noreply, &1}).() 45 | end 46 | 47 | def handle_event("connected", %{"video" => %{"width" => width, "height" => height}}, socket) do 48 | socket 49 | |> assign(:status, :connected) 50 | |> assign(:video_width, width) 51 | |> assign(:video_height, height) 52 | |> assign(:video_orientation, <<0>>) 53 | |> assign(:detections, []) 54 | |> assign(:detections_has_hotdog, nil) 55 | |> assign(:classifications, []) 56 | |> assign(:classifications_has_hotdog, nil) 57 | |> update_overlay_assigns() 58 | |> (&{:noreply, &1}).() 59 | end 60 | 61 | def handle_event("mediaEvent", %{"data" => data}, socket) do 62 | send(socket.assigns.session_pid, {:media_event, socket.assigns.peer_id, data}) 63 | {:noreply, socket} 64 | end 65 | 66 | def handle_event("error", %{"reason" => reason}, socket) do 67 | socket 68 | |> assign(:status, {:error, reason}) 69 | |> (&{:noreply, &1}).() 70 | end 71 | 72 | def handle_info({:media_event, data}, socket) do 73 | socket 74 | |> push_event("handle-media-event", %{data: data}) 75 | |> (&{:noreply, &1}).() 76 | end 77 | 78 | def handle_info({:detections, _timestamp, detections}, socket) do 79 | socket 80 | |> assign(:detections, build_detections(detections)) 81 | |> assign(:detections_has_hotdog, build_detections_has_hotdog(detections)) 82 | |> (&{:noreply, &1}).() 83 | end 84 | 85 | def handle_info({:classifications, _timestamp, classifications}, socket) do 86 | socket 87 | |> assign(:classifications, build_classifications(classifications)) 88 | |> assign(:classifications_has_hotdog, build_classifications_has_hotdog(classifications)) 89 | |> (&{:noreply, &1}).() 90 | end 91 | 92 | def handle_info({:simulcast_config, data}, socket) do 93 | socket 94 | |> push_event("handle-simulcast-config", %{data: data}) 95 | |> (&{:noreply, &1}).() 96 | end 97 | 98 | def handle_info({:format_changed, %{width: width, height: height}}, socket) do 99 | socket 100 | |> assign(:video_width, width) 101 | |> assign(:video_height, height) 102 | |> update_overlay_assigns() 103 | |> (&{:noreply, &1}).() 104 | end 105 | 106 | def handle_info({:orientation_changed, data}, socket) do 107 | socket 108 | |> assign(:video_orientation, data) 109 | |> update_overlay_assigns() 110 | |> (&{:noreply, &1}).() 111 | end 112 | 113 | def handle_info(:endpoint_crashed, socket) do 114 | socket 115 | |> assign(:session_id, nil) 116 | |> assign(:session_pid, nil) 117 | |> assign(:peer_id, nil) 118 | |> assign(:status, {:error, "Backend process crashed"}) 119 | |> (&{:noreply, &1}).() 120 | end 121 | 122 | def handle_info(_message, socket) do 123 | {:noreply, socket} 124 | end 125 | 126 | def render( 127 | %{ 128 | status: :connected, 129 | video_orientation: orientation 130 | } = assigns 131 | ) 132 | when not is_nil(orientation) do 133 | ~H""" 134 |
135 | 137 | 141 | 147 | 148 | <%= for %{ 149 | rect_attributes: rect, 150 | text_attributes: text, 151 | group_attributes: group, 152 | class_name: class_name, 153 | } <- @detections do %> 154 | 155 | 156 | <%= class_name %> 157 | 158 | <% end %> 159 | 160 | 161 |
162 |
163 | <%= if @detections_has_hotdog || @classifications_has_hotdog do %> 164 | 🌭 Hotdog 165 | <% else %> 166 | ❌ Not Hotdog 167 | <% end %> 168 |
169 |
170 |
171 | """ 172 | end 173 | 174 | def render(%{status: {:error, reason}} = assigns) do 175 | assigns = assign(assigns, :error_reason, reason) 176 | 177 | ~H""" 178 |
179 | Error: <%= @error_reason %> 180 |
181 | """ 182 | end 183 | 184 | def render(%{status: _} = assigns) do 185 | ~H""" 186 |
187 | 189 |
190 | """ 191 | end 192 | 193 | defp build_detections(detections) do 194 | Enum.map(detections, fn detection -> 195 | Map.merge(detection, %{ 196 | group_attributes: %{ 197 | data_class_id: detection.class_id, 198 | data_class_name: detection.class_name 199 | }, 200 | rect_attributes: %{ 201 | x: detection.x1, 202 | y: detection.y1, 203 | opacity: detection.score, 204 | width: detection.x2 - detection.x1, 205 | height: detection.y2 - detection.y1 206 | }, 207 | text_attributes: %{ 208 | x: detection.x1, 209 | y: detection.y1 210 | } 211 | }) 212 | end) 213 | end 214 | 215 | defp build_detections_has_hotdog(detections) do 216 | Enum.any?(detections, &(&1.class_name == "hot dog")) 217 | end 218 | 219 | defp build_classifications(classifications) do 220 | classifications 221 | end 222 | 223 | defp build_classifications_has_hotdog(classifications) do 224 | Enum.any?(classifications, &(String.contains?(&1.label, "hot dog") && &1.score > 0.75)) 225 | end 226 | 227 | defp update_overlay_assigns(socket) do 228 | <<_::4, _type::1, _flip::1, orientation::2>> = socket.assigns.video_orientation 229 | # type: 0 = front, 1 = back 230 | # flip: 0 = no, 1 = horizontal 231 | # orientation: 0 = 0°, 1 = 270°, 2 = 180°, 3 = 90° 232 | width = socket.assigns.video_width 233 | height = socket.assigns.video_height 234 | transpose = Enum.member?([1, 3], orientation) 235 | 236 | assign(socket, 237 | overlay_container: %{ 238 | viewBox: (transpose && "0 0 #{height} #{width}") || "0 0 #{width} #{height}" 239 | }, 240 | overlay_viewport: %{ 241 | width: (transpose && height) || width, 242 | height: (transpose && width) || height 243 | } 244 | ) 245 | end 246 | end 247 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/telemetry.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.Telemetry do 2 | use Supervisor 3 | import Telemetry.Metrics 4 | 5 | def start_link(arg) do 6 | Supervisor.start_link(__MODULE__, arg, name: __MODULE__) 7 | end 8 | 9 | @impl true 10 | def init(_arg) do 11 | children = [ 12 | {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} 13 | ] 14 | 15 | Supervisor.init(children, strategy: :one_for_one) 16 | end 17 | 18 | def metrics do 19 | [ 20 | summary("phoenix.endpoint.stop.duration", 21 | unit: {:native, :millisecond} 22 | ), 23 | summary("phoenix.router_dispatch.stop.duration", 24 | tags: [:route], 25 | unit: {:native, :millisecond} 26 | ), 27 | summary("vm.memory.total", unit: {:byte, :kilobyte}), 28 | summary("vm.total_run_queue_lengths.total"), 29 | summary("vm.total_run_queue_lengths.cpu"), 30 | summary("vm.total_run_queue_lengths.io") 31 | ] 32 | end 33 | 34 | defp periodic_measurements do 35 | [] 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/templates/layout/app.html.heex: -------------------------------------------------------------------------------- 1 | <%= @inner_content %> -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/templates/layout/live.html.heex: -------------------------------------------------------------------------------- 1 | 4 | 5 | 8 | 9 | <%= @inner_content %> 10 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/templates/layout/root.html.heex: -------------------------------------------------------------------------------- 1 | <% payload = assigns[:payload] %> 2 | 3 | 4 | 5 | 6 | 7 | <%= get_title(@conn) %> 8 | <%= for stylesheet_source <- get_stylesheet_sources(@conn) do %> 9 | 10 | <% end %> 11 | 12 | <%= csrf_meta_tag() %> 13 | 14 | 15 |
16 |
17 | <%= @inner_content %> 18 |
19 |
20 | <%= if payload do %> 21 | 24 | <% end %> 25 | <%= for script_source <- get_script_sources(@conn) do %> 26 | 27 | <% end %> 28 | 29 | 30 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/templates/page/index.html.heex: -------------------------------------------------------------------------------- 1 |
2 |
3 |

4 | video room 5 |

6 |
7 |
8 |
9 |
10 | 15 |
16 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/views/error_helpers.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.ErrorHelpers do 2 | use Phoenix.HTML 3 | 4 | def error_tag(form, field) do 5 | Enum.map(Keyword.get_values(form.errors, field), fn error -> 6 | content_tag(:span, translate_error(error), 7 | class: "invalid-feedback", 8 | phx_feedback_for: input_name(form, field) 9 | ) 10 | end) 11 | end 12 | 13 | def translate_error({msg, opts}) do 14 | if count = opts[:count] do 15 | Gettext.dngettext(EmporiumWeb.Gettext, "errors", msg, msg, count, opts) 16 | else 17 | Gettext.dgettext(EmporiumWeb.Gettext, "errors", msg, opts) 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/views/error_view.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.ErrorView do 2 | use EmporiumWeb, :view 3 | 4 | def template_not_found(template, _assigns) do 5 | Phoenix.Controller.status_message_from_template(template) 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/views/layout_view.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.LayoutView do 2 | use EmporiumWeb, :view 3 | alias Plug.Conn 4 | @compile {:no_warn_undefined, {Routes, :live_dashboard_path, 2}} 5 | 6 | defp get_title(%Conn{private: %{title: title}}), do: title 7 | defp get_title(%Conn{} = _), do: gettext("application.name") 8 | 9 | defp get_stylesheet_sources(%Conn{} = conn) do 10 | Enum.map(get_stylesheet_names(conn), &Routes.static_path(conn, &1)) 11 | end 12 | 13 | defp get_script_sources(%Conn{} = conn) do 14 | Enum.map(get_script_names(conn), &Routes.static_path(conn, &1)) 15 | end 16 | 17 | defp get_stylesheet_names(%Conn{private: %{stylesheet_names: names}}), do: names 18 | defp get_stylesheet_names(_), do: ["/assets/screen-generic.css"] 19 | 20 | defp get_script_names(%Conn{private: %{script_names: names}}), do: names 21 | defp get_script_names(_), do: ["/assets/screen-generic.js"] 22 | end 23 | -------------------------------------------------------------------------------- /apps/emporium_web/lib/emporium_web/views/page_view.ex: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.PageView do 2 | use EmporiumWeb, :view 3 | end 4 | -------------------------------------------------------------------------------- /apps/emporium_web/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EmporiumWeb.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :emporium_web, 7 | version: "0.1.0", 8 | build_path: "../../_build", 9 | config_path: "../../config/config.exs", 10 | deps_path: "../../deps", 11 | lockfile: "../../mix.lock", 12 | elixir: "~> 1.12", 13 | elixirc_paths: elixirc_paths(Mix.env()), 14 | compilers: [:gettext] ++ Mix.compilers(), 15 | start_permanent: Mix.env() == :prod, 16 | deps: deps() 17 | ] 18 | end 19 | 20 | def application do 21 | [ 22 | mod: {EmporiumWeb.Application, []}, 23 | extra_applications: [:logger, :runtime_tools, :os_mon] 24 | ] 25 | end 26 | 27 | defp elixirc_paths(:test), do: ["lib", "test/support"] 28 | defp elixirc_paths(_), do: ["lib"] 29 | 30 | defp deps do 31 | [ 32 | {:emporium_environment, in_umbrella: true}, 33 | {:emporium_nexus, in_umbrella: true}, 34 | {:phoenix, "~> 1.7.2"}, 35 | {:phoenix_html, "~> 3.3.1"}, 36 | {:phoenix_live_reload, "~> 1.4.1", only: :dev}, 37 | {:phoenix_live_view, "~> 0.18.18"}, 38 | {:phoenix_view, "~> 2.0.2"}, 39 | {:floki, ">= 0.30.0", only: :test}, 40 | {:phoenix_live_dashboard, "~> 0.7.2"}, 41 | {:gettext, "~> 0.18"}, 42 | {:jason, "~> 1.4.0"}, 43 | {:plug_cowboy, "~> 2.5"} 44 | ] 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /apps/emporium_web/priv/gettext/default.pot: -------------------------------------------------------------------------------- 1 | msgid "" 2 | msgstr "" 3 | 4 | msgid "Welcome to %{name}!" 5 | msgstr "" 6 | 7 | msgid "application.name" 8 | msgstr "" 9 | -------------------------------------------------------------------------------- /apps/emporium_web/priv/gettext/en/LC_MESSAGES/default.po: -------------------------------------------------------------------------------- 1 | msgid "Welcome to %{name}!" 2 | msgstr "Welcome to Emporium!" 3 | 4 | msgid "application.name" 5 | msgstr "Emporium" 6 | -------------------------------------------------------------------------------- /apps/emporium_web/priv/gettext/en/LC_MESSAGES/errors.po: -------------------------------------------------------------------------------- 1 | msgid "are still associated with this entry" 2 | msgstr "" 3 | 4 | msgid "can't be blank" 5 | msgstr "" 6 | 7 | msgid "does not match confirmation" 8 | msgstr "" 9 | 10 | msgid "has already been taken" 11 | msgstr "" 12 | 13 | msgid "has an invalid entry" 14 | msgstr "" 15 | 16 | msgid "has invalid format" 17 | msgstr "" 18 | 19 | msgid "is invalid" 20 | msgstr "" 21 | 22 | msgid "is reserved" 23 | msgstr "" 24 | 25 | msgid "is still associated with this entry" 26 | msgstr "" 27 | 28 | msgid "must be accepted" 29 | msgstr "" 30 | 31 | msgid "must be equal to %{number}" 32 | msgstr "" 33 | 34 | msgid "must be greater than %{number}" 35 | msgstr "" 36 | 37 | msgid "must be greater than or equal to %{number}" 38 | msgstr "" 39 | 40 | msgid "must be less than %{number}" 41 | msgstr "" 42 | 43 | msgid "must be less than or equal to %{number}" 44 | msgstr "" 45 | 46 | msgid "should be %{count} character(s)" 47 | msgid_plural "should be %{count} character(s)" 48 | msgstr[0] "" 49 | msgstr[1] "" 50 | 51 | msgid "should be at least %{count} character(s)" 52 | msgid_plural "should be at least %{count} character(s)" 53 | msgstr[0] "" 54 | msgstr[1] "" 55 | 56 | msgid "should be at most %{count} character(s)" 57 | msgid_plural "should be at most %{count} character(s)" 58 | msgstr[0] "" 59 | msgstr[1] "" 60 | 61 | msgid "should have %{count} item(s)" 62 | msgid_plural "should have %{count} item(s)" 63 | msgstr[0] "" 64 | msgstr[1] "" 65 | 66 | msgid "should have at least %{count} item(s)" 67 | msgid_plural "should have at least %{count} item(s)" 68 | msgstr[0] "" 69 | msgstr[1] "" 70 | 71 | msgid "should have at most %{count} item(s)" 72 | msgid_plural "should have at most %{count} item(s)" 73 | msgstr[0] "" 74 | msgstr[1] "" 75 | -------------------------------------------------------------------------------- /apps/emporium_web/priv/gettext/errors.pot: -------------------------------------------------------------------------------- 1 | msgid "are still associated with this entry" 2 | msgstr "" 3 | 4 | msgid "can't be blank" 5 | msgstr "" 6 | 7 | msgid "does not match confirmation" 8 | msgstr "" 9 | 10 | msgid "has already been taken" 11 | msgstr "" 12 | 13 | msgid "has an invalid entry" 14 | msgstr "" 15 | 16 | msgid "has invalid format" 17 | msgstr "" 18 | 19 | msgid "is invalid" 20 | msgstr "" 21 | 22 | msgid "is reserved" 23 | msgstr "" 24 | 25 | msgid "is still associated with this entry" 26 | msgstr "" 27 | 28 | msgid "must be accepted" 29 | msgstr "" 30 | 31 | msgid "must be equal to %{number}" 32 | msgstr "" 33 | 34 | msgid "must be greater than %{number}" 35 | msgstr "" 36 | 37 | msgid "must be greater than or equal to %{number}" 38 | msgstr "" 39 | 40 | msgid "must be less than %{number}" 41 | msgstr "" 42 | 43 | msgid "must be less than or equal to %{number}" 44 | msgstr "" 45 | 46 | msgid "should be %{count} character(s)" 47 | msgid_plural "should be %{count} character(s)" 48 | msgstr[0] "" 49 | msgstr[1] "" 50 | 51 | msgid "should be at least %{count} character(s)" 52 | msgid_plural "should be at least %{count} character(s)" 53 | msgstr[0] "" 54 | msgstr[1] "" 55 | 56 | msgid "should be at most %{count} character(s)" 57 | msgid_plural "should be at most %{count} character(s)" 58 | msgstr[0] "" 59 | msgstr[1] "" 60 | 61 | msgid "should have %{count} item(s)" 62 | msgid_plural "should have %{count} item(s)" 63 | msgstr[0] "" 64 | msgstr[1] "" 65 | 66 | msgid "should have at least %{count} item(s)" 67 | msgid_plural "should have at least %{count} item(s)" 68 | msgstr[0] "" 69 | msgstr[1] "" 70 | 71 | msgid "should have at most %{count} item(s)" 72 | msgid_plural "should have at most %{count} item(s)" 73 | msgstr[0] "" 74 | msgstr[1] "" 75 | -------------------------------------------------------------------------------- /bin/compile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | projectRootPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/.. 4 | 5 | # Configure Compiler 6 | export CC=/usr/bin/clang 7 | export CXX=/usr/bin/clang++ 8 | 9 | # Work around FastTLS configuration issues with OpenSSL not being found 10 | export LDFLAGS="-L/usr/local/opt/openssl/lib" 11 | export CFLAGS="-I/usr/local/opt/openssl/include/" 12 | export CPPFLAGS="-I/usr/local/opt/openssl/include/" 13 | export PKG_CONFIG_PATH="/usr/local/opt/openssl@3/lib/pkgconfig:$PKG_CONFIG_PATH" 14 | 15 | # Add CUDA for Linux 16 | export CUDA_HOME=/usr/local/cuda 17 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64 18 | export PATH=$PATH:$CUDA_HOME/bin 19 | 20 | cd "$projectRootPath" && mix compile 21 | -------------------------------------------------------------------------------- /bin/console: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | projectRootPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/.. 4 | configPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../config/dev.env 5 | configTemplatePath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../config/dev.env.template 6 | consoleScriptPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../infra/foreman-app/console.sh 7 | 8 | if [ -f $configPath ]; then 9 | source "$configPath" 10 | cd "$projectRootPath" && mix compile && bash $consoleScriptPath 11 | else 12 | cp "$configTemplatePath" "$configPath" 13 | echo "ERROR: configuration file not found for Development environment." 14 | echo " $configPath has been created and will require values to be set." 15 | fi 16 | -------------------------------------------------------------------------------- /bin/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | projectRootPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/.. 4 | configPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../config/dev.env 5 | configTemplatePath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../config/dev.env.template 6 | 7 | if [ -f $configPath ]; then 8 | source "$configPath" 9 | cd "$projectRootPath" && mix compile && forego start -r -t 0 10 | else 11 | cp "$configTemplatePath" "$configPath" 12 | echo "ERROR: configuration file not found for Development environment." 13 | echo " $configPath has been created and will require values to be set." 14 | fi 15 | -------------------------------------------------------------------------------- /bin/setup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # libsrtp2-dev libavcodec-dev libavformat-dev libavutil-dev build-essential clang llvm cmake make 4 | brew install openssl strp 5 | -------------------------------------------------------------------------------- /bin/test: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | projectRootPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/.. 4 | configPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../config/test.env 5 | configTemplatePath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../config/test.env.template 6 | scriptPath=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd -P)/../infra/foreman-app/test.sh 7 | 8 | if [ -f $configPath ]; then 9 | source "$configPath" 10 | cd "$projectRootPath" && bash $scriptPath $@ 11 | else 12 | cp "$configTemplatePath" "$configPath" 13 | echo "ERROR: configuration file not found for Test environment." 14 | echo " $configPath has been created and will require values to be set." 15 | fi 16 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | for config <- "../apps/*/config/config.exs" |> Path.expand(__DIR__) |> Path.wildcard() do 4 | import_config config 5 | end 6 | 7 | if config_env() != :test do 8 | routes = [ 9 | emporium_web: {EmporiumWeb.Endpoint, "/"} 10 | ] 11 | 12 | config :emporium_proxy, EmporiumProxy, 13 | applications: Enum.uniq(Enum.map(routes, &elem(&1, 0))), 14 | endpoints: Enum.map(routes, &elem(&1, 1)) 15 | 16 | for {app, {endpoint, mount}} <- routes do 17 | config(app, endpoint, url: [path: mount]) 18 | end 19 | end 20 | 21 | config :logger, 22 | compile_time_purge_matching: [ 23 | [level_lower_than: :info], 24 | [module: Membrane.SRTP.Encryptor, function: "handle_event/4", level_lower_than: :error] 25 | ] 26 | 27 | config :logger, :console, 28 | format: "$time $metadata[$level] $message\n", 29 | metadata: [:application, :request_id] 30 | 31 | config :phoenix, :json_library, Jason 32 | 33 | config :nx, default_backend: EXLA.Backend 34 | 35 | import_config "#{config_env()}.exs" 36 | -------------------------------------------------------------------------------- /config/dev.env.template: -------------------------------------------------------------------------------- 1 | export NGROK_SUBDOMAIN="" # such as Emporium- 2 | export SECRET_KEY_BASE="" # mix phx.gen.secret 3 | export GUARDIAN_JWK_ENCODED_KEY="" # mix guardian.gen.secret 4 | export TURN_IP="127.0.0.1" 5 | export TURN_MOCK_IP="127.0.0.1" 6 | export TURN_PORT_UDP_FROM="50000" 7 | export TURN_PORT_UDP_TO="65535" 8 | 9 | export HOST=${HOST:=localhost} 10 | export PORT=${PORT:=4000} 11 | -------------------------------------------------------------------------------- /config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # config :logger, handle_sasl_reports: true 4 | config :logger, :console, format: "[$level] $message\n" 5 | config :phoenix, :stacktrace_depth, 20 6 | config :phoenix, :plug_init_mode, :runtime 7 | -------------------------------------------------------------------------------- /config/prod.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :logger, level: :info 4 | -------------------------------------------------------------------------------- /config/releases.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | -------------------------------------------------------------------------------- /config/runtime.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | -------------------------------------------------------------------------------- /config/test.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :logger, level: :info 4 | config :phoenix, :plug_init_mode, :runtime 5 | -------------------------------------------------------------------------------- /dialyzer-ignore-warnings.exs: -------------------------------------------------------------------------------- 1 | [] 2 | -------------------------------------------------------------------------------- /infra/foreman-app/console.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | nonce=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) 4 | export ROLE="CONSOLE" 5 | export NODE_NAME="$ROLE-$nonce@localhost" 6 | export NODE_COOKIE="emporium-local" 7 | 8 | cd "`dirname $0`/../.." && iex --sname $NODE_NAME --cookie $NODE_COOKIE -S mix 9 | -------------------------------------------------------------------------------- /infra/foreman-app/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | nonce=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) 4 | export ROLE="WEB" 5 | export NODE_NAME="$ROLE-$nonce@localhost" 6 | export NODE_COOKIE="emporium-local" 7 | 8 | cd "`dirname $0`/../.." && elixir --sname $NODE_NAME --cookie $NODE_COOKIE -S mix phx.server 9 | -------------------------------------------------------------------------------- /infra/foreman-app/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | nonce=$(cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) 4 | export ROLE="TEST" 5 | export NODE_NAME="$ROLE-$nonce@localhost" 6 | export NODE_COOKIE="emporium-local" 7 | 8 | cd "`dirname $0`/../.." && elixir --sname $NODE_NAME --cookie $NODE_COOKIE -S mix test $@ 9 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Emporium.Umbrella.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | apps_path: "apps", 7 | version: "0.1.0", 8 | start_permanent: Mix.env() == :prod, 9 | deps: deps(), 10 | dialyzer: dialyzer(), 11 | releases: releases() 12 | ] 13 | end 14 | 15 | defp deps do 16 | # elixir_make requires a patched release beyond 0.7.6 due to patch: 17 | # https://github.com/elixir-lang/elixir_make/commit/58fe5b705d451a9ddf13673a785a46cda07909dc 18 | [ 19 | {:dialyxir, "~> 1.1.0", only: [:dev, :test], runtime: false}, 20 | {:elixir_make, 21 | github: "elixir-lang/elixir_make", ref: "58fe5b7", runtime: false, override: true} 22 | ] 23 | end 24 | 25 | defp dialyzer do 26 | [ 27 | plt_add_apps: [:mix, :mnesia, :iex, :ex_unit], 28 | flags: ~w(error_handling no_opaque underspecs unmatched_returns)a, 29 | ignore_warnings: "dialyzer-ignore-warnings.exs", 30 | list_unused_filters: true 31 | ] 32 | end 33 | 34 | defp releases do 35 | [ 36 | emporium: [ 37 | version: "0.0.1", 38 | include_executables_for: [:unix], 39 | applications: [ 40 | emporium_environment: :permanent, 41 | emporium_inference: :permanent, 42 | emporium_inference_resnet: :permanent, 43 | emporium_inference_yolov5: :permanent, 44 | emporium_nexus: :permanent, 45 | emporium_proxy: :permanent, 46 | emporium_web: :permanent 47 | ] 48 | ] 49 | ] 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /rel/env.sh.eex: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -z "${NODE_NAME+x}" ]; then 4 | echo "NODE_NAME not configured." 5 | export RELEASE_DISTRIBUTION="none" 6 | else 7 | export RELEASE_DISTRIBUTION="name" 8 | export RELEASE_NODE="$NODE_NAME" 9 | 10 | if [ ! -z "${NODE_COOKIE+x}" ]; then 11 | export RELEASE_COOKIE="$NODE_COOKIE" 12 | fi 13 | fi 14 | -------------------------------------------------------------------------------- /rel/remote.vm.args.eex: -------------------------------------------------------------------------------- 1 | ## Customize flags given to the VM: https://erlang.org/doc/man/erl.html 2 | ## -mode/-name/-sname/-setcookie are configured via env vars, do not set them here 3 | 4 | ## Number of dirty schedulers doing IO work (file, sockets, and others) 5 | ##+SDio 5 6 | 7 | ## Increase number of concurrent ports/sockets 8 | ##+Q 65536 9 | 10 | ## Tweak GC to run more often 11 | ##-env ERL_FULLSWEEP_AFTER 10 12 | -------------------------------------------------------------------------------- /rel/vm.args.eex: -------------------------------------------------------------------------------- 1 | ## Customize flags given to the VM: https://erlang.org/doc/man/erl.html 2 | ## -mode/-name/-sname/-setcookie are configured via env vars, do not set them here 3 | 4 | ## Number of dirty schedulers doing IO work (file, sockets, and others) 5 | ##+SDio 5 6 | 7 | ## Increase number of concurrent ports/sockets 8 | ##+Q 65536 9 | 10 | ## Tweak GC to run more often 11 | ##-env ERL_FULLSWEEP_AFTER 10 12 | -------------------------------------------------------------------------------- /vendor/setup-clang.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo pipefail 3 | 4 | sudo apt-get install \ 5 | clang-format \ 6 | clang-tidy \ 7 | clang-tools \ 8 | clang \ 9 | clangd \ 10 | libc++-dev \ 11 | libc++1 \ 12 | libc++abi-dev \ 13 | libc++abi1 \ 14 | libclang-dev \ 15 | libclang1 \ 16 | liblldb-dev \ 17 | libllvm-ocaml-dev \ 18 | libomp-dev \ 19 | libomp5 \ 20 | lld \ 21 | lldb \ 22 | llvm-dev \ 23 | llvm-runtime \ 24 | llvm 25 | -------------------------------------------------------------------------------- /vendor/setup-coco.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo pipefail 3 | 4 | if [ -z ${COCO_INSTALL_DIR:-} ]; then 5 | COCO_INSTALL_DIR="$(dirname $(realpath $0))/dataset/coco" 6 | fi 7 | 8 | if [ -z ${COCO_TYPE:-} ]; then 9 | COCO_TYPE="val2017" 10 | fi 11 | 12 | PackageURL="http://images.cocodataset.org/zips/$COCO_TYPE.zip" 13 | echo "Downloading: $PackageURL" 14 | mkdir -p $COCO_INSTALL_DIR 15 | 16 | TemporaryDirectory=$(mktemp -d) 17 | mkdir -p "$TemporaryDirectory/install" 18 | cd "$TemporaryDirectory" 19 | wget -q -O package.zip $PackageURL 20 | unzip package.zip -d "$TemporaryDirectory/install" 21 | mkdir -p "$COCO_INSTALL_DIR" 22 | mv $TemporaryDirectory/install/* "$COCO_INSTALL_DIR" 23 | rm -rf "$TemporaryDirectory" 24 | -------------------------------------------------------------------------------- /vendor/setup-cudnn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo pipefail 3 | 4 | # check https://developer.download.nvidia.com/compute/redist/cudnn/v8.8.0/local_installers/12.0/ 5 | CUDNN_PATH="$(dirname $(realpath $0))/cache/cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz" 6 | 7 | TemporaryDirectory=$(mktemp -d) 8 | cd $TemporaryDirectory 9 | tar -xzf $CUDNN_PATH -C . --strip-components=1 10 | cp include/cudnn*.h /usr/local/cuda/include 11 | cp -P lib/libcudnn* /usr/local/cuda/lib64 12 | chmod a+r /usr/local/cuda/include/cudnn*.h /usr/local/cuda/lib64/libcudnn* 13 | rm -rf "$TemporaryDirectory" 14 | -------------------------------------------------------------------------------- /vendor/setup-libtorch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo pipefail 3 | 4 | # Sets up the correct LibTorch variant based on the pre-determined version. 5 | 6 | if [ -z ${LIBTORCH_INSTALL_DIR:-} ]; then 7 | LIBTORCH_INSTALL_DIR="$(dirname $(realpath $0))/libtorch" 8 | fi 9 | 10 | if [ -z ${LIBTORCH_VERSION:-} ]; then 11 | LIBTORCH_VERSION="2.0.0" 12 | fi 13 | 14 | if [ "$(uname -s)" = "Linux" ]; then 15 | if [ -z ${LIBTORCH_TARGET:-} ]; then 16 | if command -v nvidia-smi >/dev/null 2>&1; then 17 | LIBTORCH_TARGET="cu118" 18 | else 19 | LIBTORCH_TARGET="cpu" 20 | fi 21 | fi 22 | if [ "$LIBTORCH_VERSION" == "nightly" ]; then 23 | PackageURLPrefix="https://download.pytorch.org/libtorch/nightly/$LIBTORCH_TARGET" 24 | PackageURL="$PackageURLPrefix/libtorch-cxx11-abi-shared-with-deps-latest.zip" 25 | else 26 | PackageURLPrefix="https://download.pytorch.org/libtorch/$LIBTORCH_TARGET" 27 | PackageURL="$PackageURLPrefix/libtorch-cxx11-abi-shared-with-deps-$LIBTORCH_VERSION%2B$LIBTORCH_TARGET.zip" 28 | fi 29 | elif [ "$(uname -s)" = "Darwin" ]; then 30 | LIBTORCH_TARGET="cpu" 31 | PackageURLPrefix="https://download.pytorch.org/libtorch/$LIBTORCH_TARGET" 32 | PackageURL="$PackageURLPrefix/libtorch-macos-$LIBTORCH_VERSION.zip" 33 | else 34 | echo "Unsupported Architecture" 35 | exit 1; 36 | fi 37 | 38 | echo "Downloading: $PackageURL" 39 | mkdir -p $LIBTORCH_INSTALL_DIR 40 | 41 | TemporaryDirectory=$(mktemp -d) 42 | cd "$TemporaryDirectory" 43 | wget -q -O libtorch.zip "$PackageURL" 44 | mkdir -p "$LIBTORCH_INSTALL_DIR" 45 | unzip libtorch.zip -d "$TemporaryDirectory" 46 | mv $TemporaryDirectory/libtorch/* "$LIBTORCH_INSTALL_DIR" 47 | rm -rf "$TemporaryDirectory" 48 | 49 | # Workaround of https://github.com/pytorch/pytorch/issues/68980#issuecomment-1208054795 50 | # using cu118 / PyTorch 2.0.0 51 | 52 | if [ ! -f "$LIBTORCH_INSTALL_DIR/lib/libnvrtc-builtins.so.11.8" ]; then 53 | if compgen -G "$LIBTORCH_INSTALL_DIR/lib/libnvrtc-builtins-*.so.11.8" > /dev/null; then 54 | ls -1 $LIBTORCH_INSTALL_DIR/lib/libnvrtc-builtins-*.so.11.8 | \ 55 | xargs -I{} cp {} "$LIBTORCH_INSTALL_DIR/lib/libnvrtc-builtins.so.11.8" 56 | fi 57 | fi 58 | 59 | # Also affected: 60 | # libnvrtc-672ee683.so.11.2 61 | # libnvToolsExt-847d78f2.so.1 62 | -------------------------------------------------------------------------------- /vendor/setup-opencv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo pipefail 3 | 4 | sudo apt install -y cmake g++ wget unzip 5 | PackageURL="https://github.com/opencv/opencv/archive/4.x.zip" 6 | PackageContribURL="https://github.com/opencv/opencv_contrib/archive/4.x.zip" 7 | TemporaryDirectory=$(mktemp -d) 8 | cd "$TemporaryDirectory" 9 | wget -O opencv.zip $PackageURL 10 | wget -O opencv_contrib.zip $PackageContribURL 11 | unzip opencv.zip 12 | unzip opencv_contrib.zip 13 | mkdir -p build && cd build 14 | cmake -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib-4.x/modules ../opencv-4.x 15 | cmake --build . -j $(nproc) 16 | sudo make install 17 | cd - 18 | rm -rf "$TemporaryDirectory" 19 | --------------------------------------------------------------------------------