├── .gitbook.yaml ├── .gitbook └── assets │ ├── image.png │ └── logo_bubbaloop.png ├── .gitignore ├── Cargo.toml ├── Cross.toml ├── LICENSE ├── README.md ├── build.rs ├── docker └── aarch64.Dockerfile ├── docs ├── .gitbook │ └── assets │ │ └── image.png ├── README.md ├── SUMMARY.md ├── examples │ ├── camera-recording.md │ └── hello-world.md ├── model-inference-experimental.md ├── pipelines.md ├── quickstart.md ├── tutorials │ └── security-camera.md └── usage.md ├── examples ├── python-inference │ ├── client.py │ └── requirements.txt ├── python-rerun-files │ ├── main.py │ └── requirements.txt ├── python-streaming │ ├── client.py │ └── requirements.txt └── react-app │ ├── .gitignore │ ├── README.md │ ├── eslint.config.js │ ├── index.html │ ├── package-lock.json │ ├── package.json │ ├── src │ ├── App.css │ ├── App.tsx │ ├── components │ │ ├── ConnectionSettings.css │ │ ├── ConnectionSettings.tsx │ │ ├── InferenceDisplay.css │ │ ├── InferenceDisplay.tsx │ │ ├── InferenceInstruction.css │ │ ├── InferenceInstruction.tsx │ │ ├── StreamViewerWebsocket.css │ │ └── StreamViewerWebsocket.tsx │ ├── index.css │ ├── main.tsx │ └── vite-env.d.ts │ ├── tsconfig.app.json │ ├── tsconfig.json │ ├── tsconfig.node.json │ └── vite.config.ts ├── justfile ├── package-lock.json ├── package.json ├── scripts ├── cross_deploy.sh ├── install_deps.sh ├── install_libssl1.1.sh ├── install_linux.sh ├── run_serve.sh └── uninstall_linux.sh └── src ├── api ├── handles │ ├── inference.rs │ ├── mod.rs │ ├── pipeline.rs │ ├── recording.rs │ ├── stats │ │ ├── mod.rs │ │ ├── sysinfo.rs │ │ └── whoami.rs │ └── streaming.rs ├── mod.rs ├── models │ ├── inference.rs │ ├── mod.rs │ ├── pipeline.rs │ ├── recording.rs │ └── streaming.rs └── server.rs ├── bin ├── bubbaloop.rs └── serve.rs ├── cu29 ├── mod.rs ├── msgs.rs ├── pipelines │ ├── cameras.rs │ ├── cameras_1.ron │ ├── cameras_2.ron │ ├── cameras_3.ron │ ├── cameras_4.ron │ ├── inference.ron │ ├── inference.rs │ └── mod.rs └── tasks │ ├── broadcast.rs │ ├── image_encoder.rs │ ├── inference.rs │ ├── mod.rs │ ├── recorder.rs │ ├── video_capture.rs │ └── video_writer.rs ├── lib.rs └── pipeline.rs /.gitbook.yaml: -------------------------------------------------------------------------------- 1 | root: ./docs/ 2 | 3 | structure: 4 | readme: ./docs/README.md 5 | summary: ./docs/SUMMARY.md 6 | -------------------------------------------------------------------------------- /.gitbook/assets/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kornia/bubbaloop/ba29fcdd84dd71f40e842c10db6236929efdb086/.gitbook/assets/image.png -------------------------------------------------------------------------------- /.gitbook/assets/logo_bubbaloop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kornia/bubbaloop/ba29fcdd84dd71f40e842c10db6236929efdb086/.gitbook/assets/logo_bubbaloop.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | 16 | # RustRover 17 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 18 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 19 | # and can be added to the global gitignore or merged into this file. For a more nuclear 20 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 21 | #.idea/ 22 | 23 | # venv files from python examples 24 | examples/python-*/.venv/ 25 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bubbaloop" 3 | categories = ["computer-vision", "science::robotics"] 4 | description = "Serving library for computer vision and AI Robotics" 5 | edition = "2021" 6 | homepage = "http://kornia.org" 7 | include = ["Cargo.toml"] 8 | license = "Apache-2.0" 9 | repository = "https://github.com/kornia/bubbaloop" 10 | rust-version = "1.86" 11 | version = "0.0.1-rc.1" 12 | 13 | [dependencies] 14 | argh = "0.1" 15 | axum = { version = "0.8", features = ["ws"] } 16 | bincode = "2.0.0" 17 | env_logger = "0.11" 18 | once_cell = "1.21" 19 | log = "0.4" 20 | reqwest = { version = "0.12", features = ["json"] } 21 | rerun = "0.23.2" 22 | serde = { version = "1.0", features = ["derive"] } 23 | serde_json = "1.0" 24 | sysinfo = "0.35" 25 | tokio = { version = "1", features = ["full"] } 26 | tower-http = { version = "0.6", features = ["cors"] } 27 | whoami = "1.5" 28 | 29 | # message passing framework 30 | # cu29 = { version = "0.7.0" } 31 | # cu29-helpers = { version = "0.7.0" } 32 | # TODO: fixes ron file connections order issues 33 | cu29 = { git = "https://github.com/copper-project/copper-rs.git", branch = "master" } 34 | cu29-helpers = { git = "https://github.com/copper-project/copper-rs.git", branch = "master" } 35 | 36 | kornia-image = "0.1.9" 37 | kornia-io = { version = "0.1.9", features = ["gstreamer", "turbojpeg"] } 38 | kornia-paligemma = { git = "https://github.com/kornia/kornia-paligemma.git", tag = "v0.1.0", features = [] } 39 | kornia-infernum = { git = "https://github.com/kornia/kornia-infernum.git", tag = "v0.1.0" } 40 | 41 | [features] 42 | cuda = ["kornia-paligemma/cuda"] -------------------------------------------------------------------------------- /Cross.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | default-target = "aarch64-unknown-linux-gnu" 3 | 4 | [target.aarch64-unknown-linux-gnu] 5 | dockerfile = "docker/aarch64.Dockerfile" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ./docs/README.md -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!( 3 | "cargo:rustc-env=LOG_INDEX_DIR={}", 4 | std::env::var("OUT_DIR").unwrap() 5 | ); 6 | } 7 | -------------------------------------------------------------------------------- /docker/aarch64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/cross-rs/aarch64-unknown-linux-gnu:edge 2 | 3 | RUN apt-get update && apt-get install --assume-yes \ 4 | cmake \ 5 | curl \ 6 | gdb \ 7 | pkg-config \ 8 | software-properties-common \ 9 | wget \ 10 | && \ 11 | apt-get clean 12 | 13 | ENV DEBIAN_FRONTEND=noninteractive 14 | 15 | RUN dpkg --add-architecture arm64 16 | 17 | # Install dependencies 18 | RUN apt-get update && apt-get install --assume-yes \ 19 | nasm \ 20 | libgstreamer1.0-dev:arm64 \ 21 | libgstreamer-plugins-base1.0-dev:arm64 \ 22 | libssl-dev:arm64 \ 23 | libglib2.0-dev:arm64 \ 24 | libudev-dev:arm64 \ 25 | && \ 26 | apt-get clean -------------------------------------------------------------------------------- /docs/.gitbook/assets/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kornia/bubbaloop/ba29fcdd84dd71f40e842c10db6236929efdb086/docs/.gitbook/assets/image.png -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # 🦄 Bubbaloop 2 | 3 | :earth\_africa: Serve local models for Spatial and AI Robotics Openly. 4 | 5 | * :robot: **AI & Robotics Ready**: Ideal for integrated AI, Robotics, and IoT applications. 6 | * :rocket: **REST API**: Effortlessly manage data pipelines for recording and inference. 7 | * :crab: **Rust Framework**: Offers both efficiency and safety for diverse applications. 8 | 9 | ### 👥 **Join Our Community** 10 | 11 | Connect with like-minded innovators and developers! 12 | 13 | * 💬 Discord Server: [https://discord.com/invite/HfnywwpBnD](https://discord.com/invite/HfnywwpBnD) 14 | * :book: Checkout our live [documentation](https://kornia.gitbook.io/bubbaloop) 15 | * :woman\_technologist: Give me the code: [https://github.com/kornia/bubbaloop](https://github.com/kornia/bubbaloop) 16 | 17 | ## [📢](https://emojipedia.org/loudspeaker) News 18 | 19 | * \[2025-05-11] Added tutorial for Home Security App 20 | * \[2025-04-06] Added support to inference with Google Paligemma 21 | * \[2025-03-01] Initial prototype of Inference engine pipeline 22 | * \[2025-02-01] Added recording pipeline to store rerun files and viz 23 | * \[2025-01-02] Added RTSP and Webcam support with kornia-rs 24 | * \[2024-12-28] Initial push with the basics for pipeline management 25 | -------------------------------------------------------------------------------- /docs/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Table of contents 2 | 3 | * [🦄 Bubbaloop](README.md) 4 | * [🚀 Quickstart](quickstart.md) 5 | * [💊 Stats API](usage.md) 6 | * [🍰 Pipeline API](pipelines.md) 7 | 8 | ## Examples 9 | 10 | * [🌈 Hello World](examples/hello-world.md) 11 | * [📷 Camera Recording](examples/camera-recording.md) 12 | 13 | *** 14 | 15 | * [🍄 Model Inference (experimental)](model-inference-experimental.md) 16 | 17 | ## Tutorials 18 | 19 | * [Home Security App](tutorials/security-camera.md) 20 | -------------------------------------------------------------------------------- /docs/examples/camera-recording.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Example showing how to stream data from cameras and log into disk 3 | --- 4 | 5 | # 📷 Camera Recording 6 | 7 | The Bubbaloop platform includes a `cameras` pipeline functionality which allows to stream and record data from multi camera streams and serialize in disk including the video frames metadata such as the timestamps. 8 | 9 | ## Edit the pipeline file 10 | 11 | In order to customize the recording pipeline we need to follow the steps below, eg to adjust our RTSP streams configuration: 12 | 13 | {% stepper %} 14 | {% step %} 15 | #### Update the pipeline in[ cameras.rs](../../src/cu29/pipelines/cameras.rs) 16 | 17 | Go to [`cameras.rs`](../../src/cu29/pipelines/cameras.rs) an update the `config` parameter by specifying the path to the pipeline `ron` file that you want to use for the recording task. 18 | 19 | We provide as an example a couple of pipelines to record from one and multiple cameras. See: `cameras_1.ron` , `cameras_2.ron` , etc. 20 | 21 | ```rust 22 | #[copper_runtime(config = "src/cu29/pipelines/cameras_1.ron")] 23 | struct CamerasApp {} 24 | ``` 25 | {% endstep %} 26 | 27 | {% step %} 28 | #### Customize the pipeline file 29 | 30 | You can definitely customize the `ron` file e.g to update the camera parameters like the `source_uri` to point to your RTSP camera; or enable disable the broadcasting. 31 | 32 | {% hint style="info" %} 33 | The RTSP url it's expected to be as in the following format 34 | 35 |
"rtsp://<username>:<password>@<ip>:<port>/<stream>
 36 | 
37 | {% endhint %} 38 | 39 | {% hint style="danger" %} 40 | The `channel_id` must be a valid `usize` number and must be not repeated. 41 | {% endhint %} 42 | {% endstep %} 43 | {% endstepper %} 44 | 45 | These are `ron` files examples to use with single and multicam with broadcasting included 46 | 47 | {% tabs %} 48 | {% tab title="RTSP (single)" %} 49 | ```json 50 | ( 51 | tasks: [ 52 | ( 53 | id: "cam0", 54 | type: "crate::cu29::tasks::VideoCapture", 55 | config: { 56 | "source_type": "rtsp", 57 | // URL of the RTSP camera 58 | // rtsp://:@:/ 59 | "source_uri": "rtsp://tapo_entrance:123456789@192.168.1.141:554/stream2", 60 | "channel_id": 0, 61 | } 62 | ), 63 | ( 64 | id: "enc0", 65 | type: "crate::cu29::tasks::ImageEncoder", 66 | ), 67 | ( 68 | id: "logger", 69 | type: "crate::cu29::tasks::RerunLoggerOne", 70 | config: { 71 | // Path to the directory where the recordings will be stored 72 | "path": "/tmp/", 73 | } 74 | ), 75 | ( 76 | id: "bcast0", 77 | type: "crate::cu29::tasks::ImageBroadcast", 78 | ), 79 | ], 80 | cnx: [ 81 | (src: "cam0", dst: "enc0", msg: "crate::cu29::msgs::ImageRgb8Msg"), 82 | (src: "enc0", dst: "logger", msg: "crate::cu29::msgs::EncodedImage"), 83 | (src: "enc0", dst: "bcast0", msg: "crate::cu29::msgs::EncodedImage"), 84 | ] 85 | , 86 | logging: ( 87 | slab_size_mib: 1024, // Preallocates 1GiB of memory map file at a time 88 | section_size_mib: 100, // Preallocates 100MiB of memory map per section for the main logger. 89 | enable_task_logging: false, 90 | ), 91 | ) 92 | 93 | ``` 94 | {% endtab %} 95 | 96 | {% tab title="RTSP (multi)" %} 97 | ```json 98 | ( 99 | tasks: [ 100 | ( 101 | id: "cam0", 102 | type: "crate::cu29::tasks::VideoCapture", 103 | config: { 104 | "source_type": "rtsp", 105 | // URL of the RTSP camera 106 | // rtsp://:@:/ 107 | "source_uri": "rtsp://tapo_entrance:123456789@192.168.1.141:554/stream2", 108 | "channel_id": 0, 109 | } 110 | ), 111 | ( 112 | id: "cam1", 113 | type: "crate::cu29::tasks::VideoCapture", 114 | config: { 115 | "source_type": "rtsp", 116 | // URL of the RTSP camera 117 | // rtsp://:@:/ 118 | "source_uri": "rtsp://tapo_terrace:123456789@192.168.1.151:554/stream2", 119 | "channel_id": 1, 120 | } 121 | ), 122 | ( 123 | id: "enc0", 124 | type: "crate::cu29::tasks::ImageEncoder", 125 | ), 126 | ( 127 | id: "enc1", 128 | type: "crate::cu29::tasks::ImageEncoder", 129 | ), 130 | ( 131 | id: "bcast0", 132 | type: "crate::cu29::tasks::ImageBroadcast", 133 | ), 134 | ( 135 | id: "bcast1", 136 | type: "crate::cu29::tasks::ImageBroadcast", 137 | ), 138 | ( 139 | id: "logger", 140 | type: "crate::cu29::tasks::RerunLoggerTwo", 141 | config: { 142 | // Path to the directory where the logs will be stored 143 | "path": "/tmp/", 144 | } 145 | ), 146 | ], 147 | cnx: [ 148 | (src: "cam0", dst: "enc0", msg: "crate::cu29::msgs::ImageRgb8Msg"), 149 | (src: "cam1", dst: "enc1", msg: "crate::cu29::msgs::ImageRgb8Msg"), 150 | (src: "enc0", dst: "logger", msg: "crate::cu29::msgs::EncodedImage"), 151 | (src: "enc1", dst: "logger", msg: "crate::cu29::msgs::EncodedImage"), 152 | (src: "enc0", dst: "bcast0", msg: "crate::cu29::msgs::EncodedImage"), 153 | (src: "enc1", dst: "bcast1", msg: "crate::cu29::msgs::EncodedImage"), 154 | ] 155 | , 156 | logging: ( 157 | slab_size_mib: 1024, // Preallocates 1GiB of memory map file at a time 158 | section_size_mib: 100, // Preallocates 100MiB of memory map per section for the main logger. 159 | enable_task_logging: false, 160 | ), 161 | ) 162 | 163 | ``` 164 | {% endtab %} 165 | 166 | {% tab title="Webcam" %} 167 | ```json 168 | ( 169 | tasks: [ 170 | ( 171 | id: "cam0", 172 | type: "crate::cu29::tasks::VideoCapture", 173 | config: { 174 | "source_type": "v4l2", 175 | "source_uri": "/dev/video0", 176 | "source_fps": 30, 177 | "image_cols": 640, 178 | "image_rows": 480, 179 | } 180 | ), 181 | ( 182 | id: "rerun", 183 | type: "crate::cu29::tasks::RerunLogger", 184 | config: { 185 | // Path to the directory where the logs will be stored 186 | "path": "/tmp/", 187 | // IP address of the rerun server 188 | "ip": "192.168.1.144", 189 | // Port of the rerun server 190 | "port": 9876, 191 | } 192 | ) 193 | ], 194 | cnx: [ 195 | (src: "cam0", dst: "rerun", msg: "crate::cu29::msgs::ImageRgb8Msg"), 196 | ] 197 | , 198 | logging: ( 199 | slab_size_mib: 1024, // Preallocates 1GiB of memory map file at a time 200 | section_size_mib: 100, // Preallocates 100MiB of memory map per section for the main logger. 201 | ), 202 | ) 203 | ``` 204 | {% endtab %} 205 | {% endtabs %} 206 | 207 | ## Start the server 208 | 209 | ``` 210 | just serve 211 | ``` 212 | 213 | ```bash 214 | [2025-04-13T12:22:53Z INFO bubbaloop::api::server] 🚀 Starting the server 215 | [2025-04-13T12:22:53Z INFO bubbaloop::api::server] 🔥 Listening on: 0.0.0.0:3000 216 | [2025-04-13T12:22:53Z INFO bubbaloop::api::server] 🔧 Press Ctrl+C to stop the server 217 | ``` 218 | 219 | ## Start streaming 220 | 221 | Start the camera pipeline and log using [rerun.io](https://www.rerun.io). 222 | 223 | ``` 224 | just start-pipeline cameras 0.0.0.0 3000 225 | ``` 226 | 227 | ```bash 228 | Result: { 229 | "message": "Pipeline recording started" 230 | } 231 | ``` 232 | 233 | ## Visualize the streaming 234 | 235 | You can use the example [`python-streaming`](https://github.com/kornia/bubbaloop/tree/main/examples/python-streaming) to visualize the streams in real-time using Rerun. 236 | 237 | ```bash 238 | python examples/python-streaming/client.py \ 239 | --host 0.0.0.0 --port 3000 --cameras 0 # 1 (for multi cam) 240 | ``` 241 | 242 | {% tabs %} 243 | {% tab title="Single Camera" %} 244 |
245 | {% endtab %} 246 | 247 | {% tab title="Multi Camera" %} 248 |
249 | {% endtab %} 250 | {% endtabs %} 251 | 252 | ## Start Recording 253 | 254 | Send a request to server to start recording from the cameras 255 | 256 | ```bash 257 | just start-recording 0.0.0.0 30000 258 | ``` 259 | 260 | #### Client terminal 261 | 262 | ``` 263 | Result: { 264 | "message": "Pipeline recording started" 265 | } 266 | ``` 267 | 268 | ## Stop recording 269 | 270 | To stop the pipeline, use the `stop-pipeline` command: 271 | 272 | ```bash 273 | just stop-pipeline recording 0.0.0.0 3000 274 | ``` 275 | 276 | #### **Client terminal** 277 | 278 | ``` 279 | Result: { 280 | "message": "Pipeline recording stopped" 281 | } 282 | ``` 283 | 284 | #### **Server terminal** 285 | 286 | ```bash 287 | [2025-04-13T12:10:45Z DEBUG bubbaloop::api::handles::pipeline] Request to stop pipeline: recording 288 | [2025-04-13T12:10:45Z DEBUG bubbaloop::cu29::pipelines::recording] Recording pipeline stopped 289 | [2025-04-13T12:10:45Z DEBUG re_log_encoding::file_sink] Log stream written to /tmp/1744545975.rrd 290 | ``` 291 | 292 | ## Get the recorded data and Visualize 293 | 294 | You can copy to your home directory (or via ssh) the recorded files into your computer. 295 | 296 | ```bash 297 | scp bubbaloop777:/home/nvidia/1735941642.rrd ~/data 298 | ``` 299 | 300 | Open the file directly wth rerun to introspect the recording 301 | 302 | ```bash 303 | rerun 1735941642.rrd 304 | ``` 305 | -------------------------------------------------------------------------------- /docs/examples/hello-world.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Your first Bubbaloop service experience 3 | --- 4 | 5 | # 🌈 Hello World 6 | 7 | ## Start the server 8 | 9 | ``` 10 | just serve 11 | ``` 12 | 13 | ## Request to start the task 14 | 15 | Send a HTTP request to the server to start the background task 16 | 17 | ``` 18 | just start-pipeline bubbaloop 0.0.0.0 3000 19 | ``` 20 | 21 | From the server side you will see the following 22 | 23 | ```bash 24 | [2025-01-05T15:51:33Z DEBUG bubbaloop::pipeline] | Hello !! This is a Bubbaloop !!! 🎮 25 | [2025-01-05T15:51:34Z DEBUG bubbaloop::pipeline] / Hello !! This is a Bubbaloop !!! 🌈 26 | [2025-01-05T15:51:35Z DEBUG bubbaloop::pipeline] - Hello !! This is a Bubbaloop !!! 😊 27 | [2025-01-05T15:51:36Z DEBUG bubbaloop::pipeline] \ Hello !! This is a Bubbaloop !!! 🚀 28 | [2025-01-05T15:51:37Z DEBUG bubbaloop::pipeline] | Hello !! This is a Bubbaloop !!! 🦀 29 | [2025-01-05T15:51:38Z DEBUG bubbaloop::pipeline] / Hello !! This is a Bubbaloop !!! 🎉 30 | ``` 31 | 32 | ## Stop the task 33 | 34 | To stop the pipeline, use the `stop-pipeline` command: 35 | 36 | ``` 37 | just stop-pipeline bubbaloop 0.0.0.0 3000 38 | ``` 39 | 40 | #### From client 41 | 42 | ``` 43 | Result: { 44 | "message": "Pipeline bubbaloop stopped" 45 | } 46 | ``` 47 | 48 | #### From server 49 | 50 | ```bash 51 | [2025-01-05T15:51:39Z DEBUG bubbaloop::pipeline] Request to stop pipeline: bubbaloop 52 | [2025-01-05T15:51:40Z DEBUG bubbaloop::pipeline] Pipeline bubbaloop stopped after 155 iterations 53 | ``` 54 | -------------------------------------------------------------------------------- /docs/model-inference-experimental.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Example showing how to use the models inference functionality. 3 | --- 4 | 5 | # 🍄 Model Inference (experimental) 6 | 7 | The **Bubbaloop** server is able to run **inference** efficiently Visual Language Models (VLM) using directly the camera streams without any latency in the same process and broadcast the results. 8 | 9 | Supported models (via [Kornia](https://github.com/kornia/kornia-paligemma) / [Candle](https://github.com/huggingface/candle)) 10 | 11 | * PaliGemma: [https://ai.google.dev/gemma/docs/paligemma](https://ai.google.dev/gemma/docs/paligemma/prompt-system-instructions) 12 | 13 | ## Edit the pipeline 14 | 15 | Similar to the [Camera Recording ](examples/camera-recording.md)pipeline, we can customize the `inference.ron` pipeline to adjust to our system setup. This will require compiling every time you modify your config. 16 | 17 | ```json 18 | ( 19 | tasks: [ 20 | // NOTE: Modify this block to customize 21 | ( 22 | id: "cam0", 23 | type: "crate::cu29::tasks::VideoCapture", 24 | config: { 25 | config: { 26 | // URL of the RTSP camera 27 | "source_type": "rtsp", 28 | "source_uri": "rtsp://:@:/" 29 | } 30 | ), 31 | ( 32 | id: "inference", 33 | type: "crate::cu29::tasks::Inference", 34 | ), 35 | ( 36 | id: "bcast_text", 37 | type: "crate::cu29::tasks::BroadcastChat", 38 | ), 39 | ( 40 | id: "bcast_image", 41 | type: "crate::cu29::tasks::BroadcastImage", 42 | ), 43 | ], 44 | cnx: [ 45 | (src: "cam0", dst: "inference", msg: "crate::cu29::msgs::ImageRgb8Msg"), 46 | (src: "cam0", dst: "bcast_image", msg: "crate::cu29::msgs::ImageRgb8Msg"), 47 | (src: "inference", dst: "bcast_text", msg: "crate::cu29::msgs::PromptResponseMsg"), 48 | ], 49 | logging: ( 50 | slab_size_mib: 1024, // Preallocates 1GiB of memory map file at a time 51 | section_size_mib: 100, // Preallocates 100MiB of memory map per section for the main logger. 52 | enable_task_logging: false, 53 | ), 54 | ) 55 | ``` 56 | 57 | ## Start the server 58 | 59 | ``` 60 | just serve 61 | ``` 62 | 63 | ## Start the inference 64 | 65 | ``` 66 | just start-pipeline inference 0.0.0.0 3000 67 | ``` 68 | 69 | By default, this command will start the inference engine using the prompt "cap en" — to generate a short capture from each frame. 70 | 71 | {% hint style="info" %} 72 | Check the supported prompts: [https://ai.google.dev/gemma/docs/paligemma/prompt-system-instructions](https://ai.google.dev/gemma/docs/paligemma/prompt-system-instructions) 73 | {% endhint %} 74 | 75 | In your terminal you should be able to get somethin similar 76 | 77 | ``` 78 | [2025-04-06T14:20:13Z INFO bubbaloop::api::server] 🚀 Starting the server 79 | [2025-04-06T14:20:13Z INFO bubbaloop::api::server] 🔥 Listening on: 0.0.0.0:3000 80 | [2025-04-06T14:20:13Z INFO bubbaloop::api::server] 🔧 Press Ctrl+C to stop the server 81 | [2025-04-06T14:20:31Z DEBUG bubbaloop::cu29::tasks::inference] Received response from inference thread: PromptResponseMsg { prompt: "cap en", response: " Two people are sitting on the bed. In-front of them there is a table with some objects and other things on it. On top of them there is roof, light and we can see trees and sky in the background is sunny." } 82 | ``` 83 | 84 | ## Inference settings 85 | 86 | We expose some setting via a REST api to the following end point. 87 | 88 | ``` 89 | curl -X POST "http://localhost:3000/api/v0/inference/settings" \ 90 | -H "Content-Type: application/json" \ 91 | -d '{"prompt": "answer Is there any human?"}' 92 | ``` 93 | 94 | This will fix the prompt to run inference on to detect people 95 | 96 | ## Broadcast 97 | 98 | You can access also to the image streams and prompts results via the following API including their timestamps. 99 | 100 | #### **Jpeg encoded images** 101 | 102 | ```html 103 | http://localhost:3000/api/v0/streaming/image 104 | ``` 105 | 106 | #### **Model inference results** 107 | 108 | ``` 109 | http://localhost:3000/api/v0/inference/results 110 | ``` 111 | 112 | #### Visualize streams with inference results 113 | 114 | We provide a small Python script that calls the above end points and visualize the results with [Rerun](https://rerun.io/) 115 | 116 | {% hint style="info" %} 117 | [https://github.com/kornia/bubbaloop/blob/main/examples/python-inference/client.py](../examples/python-inference/client.py) 118 | {% endhint %} 119 | 120 |
121 | 122 | ## Stop inference 123 | 124 | To stop the pipeline, use the `stop-pipeline` command: 125 | 126 | ``` 127 | just stop-pipeline inference 0.0.0.0 3000 128 | ``` 129 | -------------------------------------------------------------------------------- /docs/pipelines.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Basic usage and pipeline management with Bubbaloop 3 | --- 4 | 5 | # 🍰 Pipeline API 6 | 7 | **Bubbaloop** is a Rust-based server application that orchestrates computational pipelines using the Cu29 ([copper-rs](https://github.com/copper-project/copper-rs)) framework. It provides both an HTTP API and CLI for managing these pipelines. 8 | 9 | ## Core Concepts 10 | 11 | * Pipeline Management: The system dynamically manages multiple pipeline types (bubbaloop, inference, recording, streaming) that process data through connected tasks. 12 | * Cu29/Copper Framework: Pipelines are built using the Cu29 framework ([copper-rs](https://github.com/copper-project/copper-rs)), which provides a task-based computation model with message passing between components. 13 | * RON Configuration: Pipelines are defined in [RON](https://github.com/ron-rs/ron) (Rusty Object Notation) files that specify: 14 | * Tasks: Individual processing components with unique IDs and configurations 15 | * Connections: Message flows between tasks with specific message types 16 | 17 | ## Architecture 18 | 19 | * API Server: An Axum-based HTTP server that exposes endpoints for pipeline management 20 | * Pipeline Store: Central registry tracking all running pipelines with their statuses 21 | * Result Store: Maintains processing results and enables streaming of data between components 22 | 23 | ## Pipeline Types 24 | 25 | * `bubbaloop` — Our hello-world simple demo pipeline 26 | * `cameras` — Captures and records video streams form single or multiple camera 27 | * `inference` — Processes video streams for inference using computer vision models 28 | 29 | ## Available API 30 | 31 | * `POST /api/v0/pipeline/start` Start a pipeline with specified ID 32 | * `POST /api/v0/pipeline/stop` Stop a running pipeline 33 | * `GET /api/v0/pipeline/list` List all available pipelines with their statuses 34 | 35 | ## Usage 36 | 37 | ### Start pipeline 38 | 39 | Create and register a pipeline given its name. This will spawn a background task. 40 | 41 | ``` 42 | just start-pipeline HOST IP PIPE_NAME 43 | ``` 44 | 45 | ```bash 46 | Result: { 47 | "message": "Pipeline 'PIPE_NAME' started" 48 | } 49 | ``` 50 | 51 | ### Stop pipeline 52 | 53 | To stop the pipeline, use the `stop-pipeline` command: 54 | 55 | ``` 56 | just stop-pipeline HOST IP PIPE_NAME 57 | ``` 58 | 59 | ```bash 60 | Result: { 61 | "message": "Pipeline 'PIPE_NAME' stopped" 62 | } 63 | ``` 64 | 65 | ### List pipelines 66 | 67 | To list all the registered pipelines and their status, use the `list-pipeline` command: 68 | 69 | ``` 70 | just pipeline-list HOST IP 71 | ``` 72 | 73 | ```bash 74 | Result: [ 75 | { 76 | "id": "bubbaloop", 77 | "status": "Running" 78 | } 79 | ] 80 | ``` 81 | -------------------------------------------------------------------------------- /docs/quickstart.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Get started with serving Bubbaloop serving platform 3 | --- 4 | 5 | # 🚀 Quickstart 6 | 7 | ## Setup the project 8 | 9 | {% hint style="info" %} 10 | Windows users are recommeneded to use Windows Subsystems by running `wsl.exe --install Ubuntu-22.04` on a Powershell. 11 | {% endhint %} 12 | 13 | {% hint style="info" %} 14 | You may need to install [rust](https://www.rust-lang.org/tools/install) if you have not. 15 | {% endhint %} 16 | 17 | {% stepper %} 18 | {% step %} 19 | **Download the project** 20 | 21 | ``` 22 | git clone https://github.com/kornia/bubbaloop.git 23 | ``` 24 | {% endstep %} 25 | 26 | {% step %} 27 | **Install pre-requisites** 28 | 29 | {% hint style="info" %} 30 | you need to install `cargo` in order to fetch and build necessary packages. If you don't have `cargo`, you can install it by following the instructions on the [official Rust website](https://www.rust-lang.org/tools/install). 31 | {% endhint %} 32 | 33 | Install **justfile**: [https://github.com/casey/just?tab=readme-ov-file#linux](https://github.com/casey/just?tab=readme-ov-file#linux) 34 | {% endstep %} 35 | 36 | {% step %} 37 | **Install Dependencies** 38 | 39 | To get started, ensure all necessary system dependencies 40 | 41 | ``` 42 | just install_deps 43 | ``` 44 | {% endstep %} 45 | {% endstepper %} 46 | 47 | ## Serve in local 48 | 49 | Launch the server via the terminal; it defaults to listening on `0.0.0.0:3000` 50 | 51 | ``` 52 | just serve 53 | ``` 54 | 55 | You might observe something like this: 56 | 57 | ```bash 58 | [2025-01-04T23:14:46Z INFO bubbaloop::api] 🚀 Starting the server 59 | [2025-01-04T23:14:46Z INFO bubbaloop::api] 🔥 Listening on: 0.0.0.0:3000 60 | [2025-01-04T23:14:46Z INFO bubbaloop::api] 🔧 Press Ctrl+C to stop the server 61 | ``` 62 | 63 | ## Serve remotely 64 | 65 | Repeat the process about in a remote machine (e.g. in Nvidia Jetson) and give a `HOST`and an `IP` to serve remotely. 66 | 67 | ```bash 68 | just serve 192.168.1.154 3000 69 | ``` 70 | 71 | ## Use the Rust CLI :crab: 72 | 73 | ```bash 74 | just help 75 | ``` 76 | 77 | ```bash 78 | Usage: bubbaloop [-h ] [-p ] [] 79 | 80 | Bubbaloop CLI 81 | 82 | Options: 83 | -h, --host the host to listen on 84 | -p, --port the port to listen on 85 | --help, help display usage information 86 | 87 | Commands: 88 | inference Inference management commands 89 | pipeline Pipeline management commands 90 | recording Recording management commands 91 | stats Get stats about the server 92 | ``` 93 | -------------------------------------------------------------------------------- /docs/tutorials/security-camera.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: 'Bubbaloop 101: Turn Your Phone into a Smart Security Camera in 10 Minutes' 3 | icon: house-signal 4 | cover: >- 5 | https://images.unsplash.com/photo-1520697830682-bbb6e85e2b0b?crop=entropy&cs=srgb&fm=jpg&ixid=M3wxOTcwMjR8MHwxfHNlYXJjaHw4fHxzZWN1cml0eXxlbnwwfHx8fDE3NDY4OTI5ODF8MA&ixlib=rb-4.1.0&q=85 6 | coverY: 0 7 | --- 8 | 9 | # Home Security App 10 | 11 | **Why should you care?** 12 | 13 | * **You already own the hardware.** An old iPhone or Android device on your windowsill is now your first smart security feed. 14 | * **Privacy‑first.** Everything stays local on a $249 Jetson Orin Nano or your laptop – no cloud fees, no vendor lock‑in. 15 | * **Instant insight.** Live multi‑camera visualization and local video recording with spatial intelligence built in. 16 | 17 | This guide walks you through setting up **Bubbaloop**, an open-source camera pipeline built with Rust and [kornia-rs](https://github.com/kornia/kornia-rs), to: 18 | 19 | * Ingest real-time video from your phone or IP cameras 20 | * Do high level vision tasks like question answering, object detection etc on frames 21 | * Visualize and interact with the results in real-time 22 | * All with high performance on low-cost edge hardware 23 | 24 | ⏱️ You’ll go from "unopened box" to live feed + local recording in 10–15 minutes. 25 | 26 | *** 27 | 28 | ## What You'll Need 29 | 30 | ### Your Phone or Any Camera 31 | 32 | * **iPhone** – use [RTSP Stream](https://apps.apple.com/us/app/rtsp-stream/id6474928937) or Larix Broadcaster 33 | * **Android** – use [WebCamPro](https://play.google.com/store/apps/details?id=com.shenyaocn.android.WebCamPro\&hl=en) 34 | * **Optional**: IP Cam (RTSP compatible) – e.g. TP-Link Tapo TC65 (\~£29) 35 | 36 |
37 | 38 | ### Hardware 39 | 40 | * **Jetson Orin Nano (8GB)** – [Buy here from Seeed Studio](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) (\~$249) 41 | * Or your **Linux laptop / PC** 42 | 43 |
44 | 45 | ### Software & Tools 46 | 47 | * Rust + Cargo — [https://www.rust-lang.org/](https://www.rust-lang.org/) 48 | * Kornia-rs: high-performance vision tools in Rust — [https://github.com/kornia/kornia-rs](https://github.com/kornia/kornia-rs) 49 | * Just: command runner — [https://just.systems/](https://just.systems/) 50 | * [Rerun.io](https://rerun.io/) for real-time visualization (optional but recommended) 51 | 52 | *** 53 | 54 | ## Set Up Camera Streaming First 55 | 56 | {% tabs %} 57 | {% tab title="iPhone" %} 58 | * Download [RTSP Stream](https://apps.apple.com/us/app/rtsp-stream/id6474928937) 59 | * Start a stream and take note of the RTSP URL (e.g. `rtsp://your-ip:8554/live`) 60 | {% endtab %} 61 | 62 | {% tab title="Android" %} 63 | * Install [WebCamPro](https://play.google.com/store/apps/details?id=com.shenyaocn.android.WebCamPro\&hl=en) 64 | * Enable RTSP streaming 65 | * Get your stream URL (e.g. `rtsp://192.168.1.x:8554/live`) 66 | {% endtab %} 67 | {% endtabs %} 68 | 69 | *** 70 | 71 | ## Step-by-Step Setup 72 | 73 | ### Clone the Repo 74 | 75 | ```bash 76 | git clone https://github.com/kornia/bubbaloop.git 77 | cd bubbaloop 78 | ``` 79 | 80 | ## Configure Your Camera 81 | 82 | Edit `src/cu29/pipelines/cameras_1.ron`: 83 | 84 | ```json 85 | ( 86 | tasks: [ 87 | ( 88 | id: "cam0", 89 | type: "crate::cu29::tasks::VideoCapture", 90 | config: { 91 | "source_type": "rtsp", 92 | // URL of the RTSP camera 93 | // rtsp://:@:/ 94 | "source_uri": "rtsp://tapo_entrance:123456789@192.168.1.141:554/stream2", 95 | "channel_id": 0, 96 | } 97 | ), 98 | ], 99 | ) 100 | ``` 101 | 102 | ### Install bubbaloop 103 | 104 | ```bash 105 | sudo ./scripts/install_linux.sh 106 | ``` 107 | 108 | This will install all the necessary dependencies including Rust (if not installed on your computer) and start the system process. You can check the status via 109 | 110 | ```bash 111 | systemctl status bubbaloop 112 | ``` 113 | 114 | for real time logs 115 | 116 | ```bash 117 | sudo journalctl -u bubbaloop.service -f 118 | ``` 119 | 120 | ## Start a Camera Pipeline 121 | 122 | ```bash 123 | bubbaloop pipeline start --name cameras 124 | ``` 125 | 126 | To stop: 127 | 128 | ```bash 129 | bubbaloop pipeline stop --name cameras 130 | ``` 131 | 132 | List all pipelines: 133 | 134 | ```bash 135 | bubbaloop pipeline list 136 | ``` 137 | 138 | *** 139 | 140 | ## Start a recording 141 | 142 | ```bash 143 | bubbaloop recording start 144 | ``` 145 | 146 | To stop: 147 | 148 | ```bash 149 | bubbaloop recording stop 150 | ``` 151 | 152 | *** 153 | 154 | ## Visualize with Rerun 155 | 156 | ```bash 157 | python examples/python-streaming/client.py --host 0.0.0.0 --port 3000 --cameras 0 158 | ``` 159 | 160 | Or view a recorded `.rrd` file: 161 | 162 | ```bash 163 | scp your-device:/tmp/1735941642.rrd ./ 164 | rerun 1735941642.rrd 165 | ``` 166 | 167 |
168 | 169 | *** 170 | 171 | ## Running Paligemma for Object Detection (Experimental) 172 | 173 | {% hint style="warning" %} 174 | For now the pipelines are mutually exclusive. This means that before starting the inference you need to stop any running pipeline. 175 | {% endhint %} 176 | 177 | Now you can start safely the inference engine 178 | 179 | ```bash 180 | bubbaloop pipeline start --name inference 181 | ``` 182 | 183 | ### Customise the prompt 184 | 185 | You can change the prompt online with the following command 186 | 187 | ```bash 188 | bubbaloop inference settings --prompt "Is there any human?" 189 | ``` 190 | 191 | ### Request the inference result 192 | 193 | The inference result can be obtained using the following command 194 | 195 | ```bash 196 | bubbaloop inference result 197 | ``` 198 | 199 | #### Client 200 | 201 | ```bash 202 | Result: { 203 | "Success": { 204 | "channel_id": 0, 205 | "prompt": "Is there any human?", 206 | "response": "no", 207 | "stamp_ns": 141281452950 208 | } 209 | } 210 | ``` 211 | 212 | *** 213 | 214 | ## Contribute / Feedback 215 | 216 | Join our [Discord server](https://discord.com/invite/HfnywwpBnD) or open issues on [GitHub](https://github.com/kornia/bubbaloop). 217 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Low level utilities with the Bubbaloop server to get system stats and metrics 3 | layout: 4 | title: 5 | visible: true 6 | description: 7 | visible: true 8 | tableOfContents: 9 | visible: true 10 | outline: 11 | visible: true 12 | pagination: 13 | visible: true 14 | --- 15 | 16 | # 💊 Stats API 17 | 18 | The **Bubbaloop** server provides a comprehensive REST API that allows users to retrieve detailed system information through the `/api/v0/stats` endpoint. This API leverages established Rust libraries to deliver accurate and extensive system data in a structured JSON format. 19 | 20 | We expose the following functionality 21 | 22 | * `whoami` [https://docs.rs/whoami/latest/whoami](https://docs.rs/whoami/latest/whoami/) 23 | * `sysinfo` [https://docss.rs/sysinfo/latest/sysinfo](https://docs.rs/sysinfo/latest/sysinfo/) 24 | 25 | ## Available API 26 | 27 | * `GET /api/v0/stats/whoami` — Provides detailed information about the system's identity 28 | * `GET /api/v0/stats/sysinfo` — Delivers comprehensive system resource metric 29 | 30 | ## Usage 31 | 32 | ### whoami 33 | 34 | ``` 35 | just whoami 0.0.0.0 3000 36 | ``` 37 | 38 | #### **Server terminal** 39 | 40 | ```bash 41 | [2025-04-13T15:03:20Z DEBUG bubbaloop::api::handles::stats::whoami] 🤖 Received request for whoami 42 | ``` 43 | 44 | #### **Client terminal** 45 | 46 | ```bash 47 | Result: { 48 | "arch": "Arm64", 49 | "desktop_env": "Unknown", 50 | "device_name": "nvidia-desktop", 51 | "distro": "Ubuntu 22.04.5 LTS", 52 | "hostname": "nvidia-desktop", 53 | "platform": "Linux", 54 | "realname": "nvidia", 55 | "username": "nvidia" 56 | } 57 | ``` 58 | 59 | ### sysinfo 60 | 61 | ``` 62 | just sysinfo 0.0.0.0 3000 63 | ``` 64 | 65 | #### **Server terminal** 66 | 67 | ```bash 68 | [2025-04-13T15:03:45Z DEBUG bubbaloop::api::handles::stats::sysinfo] 🤖 Received request for sysinfo 69 | ``` 70 | 71 | #### **Client terminal** 72 | 73 | ```json 74 | Result: { 75 | "available_memory": 7011606528, 76 | "cpus": [ 77 | { 78 | "brand": "Cortex-A78AE", 79 | "frequency": 1113, 80 | "name": "cpu0", 81 | "usage": 0.0 82 | }, 83 | { 84 | "brand": "Cortex-A78AE", 85 | "frequency": 1113, 86 | "name": "cpu1", 87 | "usage": 0.0 88 | }, 89 | { 90 | "brand": "Cortex-A78AE", 91 | "frequency": 1113, 92 | "name": "cpu2", 93 | "usage": 0.0 94 | }, 95 | { 96 | "brand": "Cortex-A78AE", 97 | "frequency": 1113, 98 | "name": "cpu3", 99 | "usage": 0.0 100 | }, 101 | { 102 | "brand": "Cortex-A78AE", 103 | "frequency": 729, 104 | "name": "cpu4", 105 | "usage": 0.0 106 | }, 107 | { 108 | "brand": "Cortex-A78AE", 109 | "frequency": 729, 110 | "name": "cpu5", 111 | "usage": 0.0 112 | } 113 | ], 114 | "disks": [ 115 | { 116 | "available_space": 186810265600, 117 | "file_system": "ext4", 118 | "mount_point": "/", 119 | "name": "/dev/mmcblk0p1", 120 | "total_space": 250131267584 121 | }, 122 | { 123 | "available_space": 65946624, 124 | "file_system": "vfat", 125 | "mount_point": "/boot/efi", 126 | "name": "/dev/mmcblk0p10", 127 | "total_space": 66059264 128 | } 129 | ], 130 | "free_memory": 4320612352, 131 | "global_cpu_usage": 18.697363, 132 | "host_name": "nvidia-desktop", 133 | "kernel_version": "5.15.148-tegra", 134 | "name": "Ubuntu", 135 | "os_version": "22.04", 136 | "total_memory": 7990116352, 137 | "total_swap": 3995049984, 138 | "used_memory": 978509824 139 | } 140 | ``` 141 | -------------------------------------------------------------------------------- /examples/python-inference/client.py: -------------------------------------------------------------------------------- 1 | """Example of a client that requests the inference result from the server.""" 2 | 3 | import argparse 4 | import asyncio 5 | import httpx 6 | import rerun as rr 7 | import kornia_rs as kr 8 | import numpy as np 9 | 10 | 11 | async def get_api_response(client: httpx.AsyncClient, url: str) -> dict | None: 12 | try: 13 | response = await client.get(url) 14 | except httpx.HTTPError as _: 15 | print("The request timed out. Please try again.") 16 | return 17 | 18 | if response is None: 19 | return None 20 | 21 | json_response = response.json() 22 | return json_response 23 | 24 | 25 | def response_to_image(response: dict) -> rr.Image: 26 | # decode the image 27 | decoder = kr.ImageDecoder() 28 | data = decoder.decode(bytes(response["data"])) 29 | return rr.Image(data) 30 | 31 | 32 | def response_to_inference_result(response: dict) -> rr.TextLog: 33 | log_text = f"prompt: {response['prompt']} -- response: {response['response']}" 34 | return rr.TextLog(log_text, level=rr.TextLogLevel.INFO) 35 | 36 | 37 | async def poll_image(client: httpx.AsyncClient, url: str, rr): 38 | while True: 39 | # get the image from the server 40 | response = await get_api_response(client, url) 41 | 42 | if response is not None and "Success" in response: 43 | response = response["Success"] 44 | rr.set_time( 45 | "session", 46 | timestamp=np.datetime64(response["stamp_ns"], "ns"), 47 | ) 48 | rr.log(f"/cam/{response['channel_id']}", response_to_image(response)) 49 | 50 | 51 | async def poll_inference_result(client: httpx.AsyncClient, url: str, rr): 52 | while True: 53 | # get the inference result from the server 54 | response = await get_api_response(client, url) 55 | 56 | if response is not None and "Success" in response: 57 | response = response["Success"] 58 | rr.set_time( 59 | "session", 60 | timestamp=np.datetime64(response["stamp_ns"], "ns"), 61 | ) 62 | rr.log( 63 | f"/logs/{response['channel_id']}", 64 | response_to_inference_result(response), 65 | ) 66 | 67 | 68 | async def main() -> None: 69 | """Main function to receive the inference result from the server.""" 70 | parser = argparse.ArgumentParser() 71 | parser.add_argument("--host", type=str, default="0.0.0.0") 72 | parser.add_argument("--port", type=int, default=3000) 73 | args = parser.parse_args() 74 | 75 | rr.init("rerun_inference_client", spawn=True) 76 | 77 | async with httpx.AsyncClient(timeout=None) as client: 78 | image_task = asyncio.create_task( 79 | poll_image( 80 | client, 81 | url=f"http://{args.host}:{args.port}/api/v0/streaming/image/0", 82 | rr=rr, 83 | ) 84 | ) 85 | 86 | inference_task = asyncio.create_task( 87 | poll_inference_result( 88 | client, 89 | url=f"http://{args.host}:{args.port}/api/v0/inference/result", 90 | rr=rr, 91 | ) 92 | ) 93 | 94 | await asyncio.gather(image_task, inference_task) 95 | 96 | 97 | if __name__ == "__main__": 98 | asyncio.run(main()) 99 | -------------------------------------------------------------------------------- /examples/python-inference/requirements.txt: -------------------------------------------------------------------------------- 1 | httpx==0.27.0 2 | rerun-sdk 3 | kornia_rs -------------------------------------------------------------------------------- /examples/python-rerun-files/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script reads a rerun file, decodes the images and logs them to rerun again. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import kornia_rs as kr 8 | import rerun as rr 9 | 10 | 11 | def main() -> None: 12 | parser = argparse.ArgumentParser( 13 | description="Read a rerun file and print the messages" 14 | ) 15 | parser.add_argument("--log-file", type=Path, required=True) 16 | args = parser.parse_args() 17 | 18 | rr.init("rerun_video_example", spawn=True) 19 | 20 | # load the recording 21 | recording = rr.dataframe.load_recording(args.log_file) 22 | # print(recording.schema().component_columns()) 23 | 24 | image_decoder = kr.ImageDecoder() 25 | 26 | for cam_topic in ["/cam/0", "/cam/1"]: 27 | print(f"Processing {cam_topic} ...") 28 | view = recording.view(index="log_time", contents=cam_topic) 29 | table = view.select().read_all() 30 | 31 | # convert the table to a pandas dataframe to iterate over the rows 32 | df = table.to_pandas() 33 | 34 | for _, row in df.iterrows(): 35 | _, time, blob, media_type = row 36 | if media_type is None: 37 | continue 38 | 39 | # decode the jpeg image to a numpy array HxWxC 40 | image = image_decoder.decode(blob[0].tobytes()) 41 | 42 | rr.set_time_nanos("timeline", time.nanosecond) 43 | rr.log(cam_topic, rr.Image(image)) 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /examples/python-rerun-files/requirements.txt: -------------------------------------------------------------------------------- 1 | rerun-sdk 2 | kornia_rs 3 | pandas -------------------------------------------------------------------------------- /examples/python-streaming/client.py: -------------------------------------------------------------------------------- 1 | """Example of a client that requests the streaming image from the server. 2 | 3 | Usage: 4 | python examples/python-streaming/client.py --host 0.0.0.0 --port 3000 --cameras 0 1 5 | """ 6 | 7 | import argparse 8 | import asyncio 9 | import httpx 10 | import rerun as rr 11 | import kornia_rs as kr 12 | 13 | 14 | async def get_api_response(client: httpx.AsyncClient, url: str) -> dict | None: 15 | try: 16 | response = await client.get(url) 17 | except httpx.HTTPError as _: 18 | print("The request timed out. Please try again.") 19 | return 20 | 21 | if response is None: 22 | return None 23 | 24 | json_response = response.json() 25 | return json_response 26 | 27 | 28 | def response_to_image(response: dict) -> rr.Image: 29 | # decode the JPEG image 30 | decoder = kr.ImageDecoder() 31 | data = decoder.decode(bytes(response["data"])) 32 | return rr.Image(data) 33 | 34 | 35 | async def poll_image(client: httpx.AsyncClient, url: str, rr): 36 | while True: 37 | # get the image from the server 38 | response = await get_api_response(client, url) 39 | 40 | if response is not None and "Success" in response: 41 | response = response["Success"] 42 | rr.set_time_sequence("session", response["stamp_ns"]) 43 | rr.log(f"/cam/{response['channel_id']}", response_to_image(response)) 44 | 45 | 46 | async def main() -> None: 47 | """Main function to receive the streaming images from the server.""" 48 | parser = argparse.ArgumentParser() 49 | parser.add_argument("--host", type=str, default="0.0.0.0") 50 | parser.add_argument("--port", type=int, default=3000) 51 | parser.add_argument("--cameras", type=int, nargs="+", default=[0]) 52 | args = parser.parse_args() 53 | 54 | rr.init("rerun_streaming_client", spawn=True) 55 | 56 | async with httpx.AsyncClient(timeout=None) as client: 57 | image_tasks = [] 58 | for camera_id in args.cameras: 59 | image_tasks.append( 60 | asyncio.create_task( 61 | poll_image( 62 | client, 63 | url=f"http://{args.host}:{args.port}/api/v0/streaming/image/{camera_id}", 64 | rr=rr, 65 | ) 66 | ) 67 | ) 68 | await asyncio.gather(*image_tasks) 69 | 70 | 71 | if __name__ == "__main__": 72 | asyncio.run(main()) 73 | -------------------------------------------------------------------------------- /examples/python-streaming/requirements.txt: -------------------------------------------------------------------------------- 1 | httpx==0.27.0 2 | rerun-sdk 3 | kornia_rs -------------------------------------------------------------------------------- /examples/react-app/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules/** 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /examples/react-app/README.md: -------------------------------------------------------------------------------- 1 | # Bubbaloop React App 2 | 3 | This is a React application for connecting to and displaying camera streams and inference results from a Bubbaloop backend. 4 | 5 | https://github.com/user-attachments/assets/2ba6575a-6a47-43d7-a8eb-0cdb76be1d43 6 | 7 | ## Prerequisites 8 | 9 | Before running the React app, you must have the Bubbaloop server running with the inference pipeline: 10 | 11 | 1. **Start the Bubbaloop server** 12 | ```bash 13 | # In the root of your bubbaloop project 14 | 15 | # First, start the server 16 | just serve 0.0.0.0 3000 17 | 18 | # Then, in another terminal, start the inference pipeline 19 | just start-pipeline inference 0.0.0.0 3000 20 | ``` 21 | 22 | 2. **Verify the server is running** 23 | The server should start the inference pipeline and be ready to accept connections. 24 | You should see logs indicating successful initialization. 25 | 26 | ## Running the Application 27 | 28 | Once the server is running, follow these steps to run the React app: 29 | 30 | 1. **Install dependencies** 31 | ```bash 32 | cd examples/react-app 33 | npm install 34 | ``` 35 | 36 | 2. **Start the development server** 37 | ```bash 38 | npm run dev 39 | ``` 40 | 41 | 3. **Access the application** 42 | The app will be available at http://localhost:5173 (or another port if 5173 is in use) 43 | 44 | ## Using the Application 45 | 46 | 1. **Configure connection settings** 47 | - The app includes a connection settings panel to configure the host and port of your backend services 48 | - Default values are set to 0.0.0.0:3000 49 | - Update these values to match your backend server configuration 50 | - Click "Update Connection" to apply changes 51 | 52 | 2. **Features** 53 | - Stream viewer for camera feeds 54 | - Inference instruction panel for sending commands 55 | - Inference result display showing detection outcomes 56 | 57 | ## Requirements 58 | 59 | - Node.js (v16 or later recommended) 60 | - A running Bubbaloop backend service 61 | -------------------------------------------------------------------------------- /examples/react-app/eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import reactHooks from 'eslint-plugin-react-hooks' 4 | import reactRefresh from 'eslint-plugin-react-refresh' 5 | import tseslint from 'typescript-eslint' 6 | 7 | export default tseslint.config( 8 | { ignores: ['dist'] }, 9 | { 10 | extends: [js.configs.recommended, ...tseslint.configs.recommended], 11 | files: ['**/*.{ts,tsx}'], 12 | languageOptions: { 13 | ecmaVersion: 2020, 14 | globals: globals.browser, 15 | }, 16 | plugins: { 17 | 'react-hooks': reactHooks, 18 | 'react-refresh': reactRefresh, 19 | }, 20 | rules: { 21 | ...reactHooks.configs.recommended.rules, 22 | 'react-refresh/only-export-components': [ 23 | 'warn', 24 | { allowConstantExport: true }, 25 | ], 26 | }, 27 | }, 28 | ) 29 | -------------------------------------------------------------------------------- /examples/react-app/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 9 | Bubbaloop 10 | 11 | 12 | 13 |
14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /examples/react-app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "react-app", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "tsc -b && vite build", 9 | "lint": "eslint .", 10 | "preview": "vite preview" 11 | }, 12 | "dependencies": { 13 | "react": "^19.1.0", 14 | "react-dom": "^19.1.0", 15 | "react-json-tree": "^0.20.0" 16 | }, 17 | "devDependencies": { 18 | "@eslint/js": "^9.25.0", 19 | "@types/react": "^19.1.2", 20 | "@types/react-dom": "^19.1.2", 21 | "@vitejs/plugin-react": "^4.4.1", 22 | "eslint": "^9.25.0", 23 | "eslint-plugin-react-hooks": "^5.2.0", 24 | "eslint-plugin-react-refresh": "^0.4.19", 25 | "globals": "^16.0.0", 26 | "typescript": "~5.8.3", 27 | "typescript-eslint": "^8.30.1", 28 | "vite": "^6.3.5" 29 | } 30 | } -------------------------------------------------------------------------------- /examples/react-app/src/App.css: -------------------------------------------------------------------------------- 1 | /* App.css */ 2 | .App { 3 | max-width: 900px; 4 | margin: 0 auto; 5 | padding: 20px; 6 | font-family: Arial, sans-serif; 7 | } 8 | 9 | .App-header { 10 | text-align: center; 11 | margin-bottom: 30px; 12 | } 13 | 14 | .app-content { 15 | display: flex; 16 | flex-direction: column; 17 | gap: 20px; 18 | } 19 | 20 | .app-error-log { 21 | margin-top: 20px; 22 | padding: 15px; 23 | background-color: #f8f9fa; 24 | border: 1px solid #ddd; 25 | border-radius: 8px; 26 | } 27 | 28 | .app-error-log h3 { 29 | margin-top: 0; 30 | color: #721c24; 31 | } -------------------------------------------------------------------------------- /examples/react-app/src/App.tsx: -------------------------------------------------------------------------------- 1 | // App.tsx 2 | import React, { useState } from 'react'; 3 | import './App.css'; 4 | import StreamViewerWebsocket from './components/StreamViewerWebsocket'; 5 | import InferenceResultDisplay from './components/InferenceDisplay'; 6 | import InferenceInstruction from './components/InferenceInstruction'; 7 | import ConnectionSettings from './components/ConnectionSettings'; 8 | 9 | const App: React.FC = () => { 10 | const [host, setHost] = useState('0.0.0.0'); 11 | const [port, setPort] = useState('3000'); 12 | const [key, setKey] = useState(0); 13 | 14 | const baseUrl = `http://${host}:${port}`; 15 | const wsUrl = `ws://${host}:${port}`; 16 | 17 | const handleConnectionUpdate = (newHost: string, newPort: string) => { 18 | setHost(newHost); 19 | setPort(newPort); 20 | // Increment key to force re-mounting the components 21 | setKey(prevKey => prevKey + 1); 22 | }; 23 | 24 | return ( 25 |
26 |
27 |

Bubbaloop

28 |
29 | 30 | 31 | 32 |
33 | 37 | 42 | 48 |
49 |
50 | ); 51 | }; 52 | 53 | export default App; -------------------------------------------------------------------------------- /examples/react-app/src/components/ConnectionSettings.css: -------------------------------------------------------------------------------- 1 | s.connection-settings { 2 | background-color: #f5f5f5; 3 | padding: 15px; 4 | margin-bottom: 20px; 5 | border-radius: 5px; 6 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); 7 | } 8 | 9 | .connection-settings form { 10 | display: flex; 11 | flex-wrap: wrap; 12 | gap: 10px; 13 | align-items: center; 14 | } 15 | 16 | .connection-settings .form-group { 17 | display: flex; 18 | align-items: center; 19 | gap: 5px; 20 | } 21 | 22 | .connection-settings label { 23 | font-weight: bold; 24 | margin-right: 5px; 25 | } 26 | 27 | .connection-settings input { 28 | padding: 8px; 29 | border: 1px solid #ccc; 30 | border-radius: 4px; 31 | } 32 | 33 | .connection-settings button { 34 | background-color: #4a90e2; 35 | color: white; 36 | border: none; 37 | padding: 8px 16px; 38 | border-radius: 4px; 39 | cursor: pointer; 40 | font-weight: bold; 41 | } 42 | 43 | .connection-settings button:hover { 44 | background-color: #357ab8; 45 | } 46 | 47 | @media (max-width: 600px) { 48 | .connection-settings form { 49 | flex-direction: column; 50 | align-items: stretch; 51 | } 52 | } -------------------------------------------------------------------------------- /examples/react-app/src/components/ConnectionSettings.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState } from 'react'; 2 | import './ConnectionSettings.css'; 3 | 4 | interface ConnectionSettingsProps { 5 | onUpdate: (host: string, port: string) => void; 6 | initialHost: string; 7 | initialPort: string; 8 | } 9 | 10 | const ConnectionSettings: React.FC = ({ 11 | onUpdate, 12 | initialHost, 13 | initialPort 14 | }) => { 15 | const [host, setHost] = useState(initialHost); 16 | const [port, setPort] = useState(initialPort); 17 | 18 | const handleSubmit = (e: React.FormEvent) => { 19 | e.preventDefault(); 20 | onUpdate(host, port); 21 | }; 22 | 23 | return ( 24 |
25 |
26 |
27 | 28 | setHost(e.target.value)} 33 | placeholder="Enter host (e.g., localhost or IP)" 34 | /> 35 |
36 |
37 | 38 | setPort(e.target.value)} 43 | placeholder="Enter port (e.g., 3000)" 44 | /> 45 |
46 | 47 |
48 |
49 | ); 50 | }; 51 | 52 | export default ConnectionSettings; -------------------------------------------------------------------------------- /examples/react-app/src/components/InferenceDisplay.css: -------------------------------------------------------------------------------- 1 | /* InferenceDisplay.css */ 2 | .inference-display { 3 | border: 1px solid #ddd; 4 | border-radius: 8px; 5 | overflow: hidden; 6 | background-color: #f8f9fa; 7 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); 8 | margin: 10px 0; 9 | } 10 | 11 | .inference-header { 12 | display: flex; 13 | justify-content: space-between; 14 | align-items: center; 15 | padding: 10px 15px; 16 | background-color: #343a40; 17 | color: white; 18 | border-bottom: 1px solid #ddd; 19 | } 20 | 21 | .inference-header h3 { 22 | margin: 0; 23 | font-size: 16px; 24 | } 25 | 26 | .loading-indicator { 27 | color: #4CAF50; 28 | animation: pulse 1.5s infinite; 29 | font-size: 14px; 30 | } 31 | 32 | .inference-content { 33 | padding: 15px; 34 | max-height: 300px; 35 | overflow-y: auto; 36 | } 37 | 38 | .inference-content pre { 39 | margin: 0; 40 | font-family: monospace; 41 | white-space: pre-wrap; 42 | word-break: break-word; 43 | } 44 | 45 | .inference-error { 46 | color: #721c24; 47 | background-color: #f8d7da; 48 | padding: 10px; 49 | border-radius: 4px; 50 | text-align: center; 51 | } 52 | 53 | .retry-button { 54 | background-color: #007bff; 55 | color: white; 56 | border: none; 57 | border-radius: 4px; 58 | padding: 5px 10px; 59 | margin-top: 10px; 60 | cursor: pointer; 61 | } 62 | 63 | .retry-button:hover { 64 | background-color: #0056b3; 65 | } 66 | 67 | @keyframes pulse { 68 | 0% { 69 | opacity: 0.4; 70 | } 71 | 72 | 50% { 73 | opacity: 1; 74 | } 75 | 76 | 100% { 77 | opacity: 0.4; 78 | } 79 | } -------------------------------------------------------------------------------- /examples/react-app/src/components/InferenceDisplay.tsx: -------------------------------------------------------------------------------- 1 | // InferenceDisplay.tsx 2 | import React, { useState, useEffect, useRef } from 'react'; 3 | import { JSONTree } from 'react-json-tree'; 4 | import './InferenceDisplay.css'; 5 | 6 | interface InferenceDisplayProps { 7 | inferenceUrl: string; 8 | refreshRate?: number; 9 | width?: string | number; 10 | height?: string | number; 11 | title?: string; 12 | } 13 | 14 | // Monokai theme for direct application 15 | const monokai = { 16 | scheme: 'monokai', 17 | base00: '#272822', // background 18 | base01: '#383830', 19 | base02: '#49483e', 20 | base03: '#75715e', // comments 21 | base04: '#a59f85', 22 | base05: '#f8f8f2', // text 23 | base06: '#f5f4f1', 24 | base07: '#f9f8f5', 25 | base08: '#f92672', // red 26 | base09: '#fd971f', // orange 27 | base0A: '#f4bf75', // yellow 28 | base0B: '#a6e22e', // green 29 | base0C: '#a1efe4', // aqua 30 | base0D: '#66d9ef', // blue 31 | base0E: '#ae81ff', // purple 32 | base0F: '#cc6633' // brown 33 | }; 34 | 35 | const InferenceDisplay: React.FC = ({ 36 | inferenceUrl, 37 | refreshRate = 1000, 38 | width = '100%', 39 | height = 'auto', 40 | title = 'Inference Results', 41 | }) => { 42 | const [inferenceData, setInferenceData] = useState(null); 43 | const [error, setError] = useState(null); 44 | const [isLoading, setIsLoading] = useState(true); 45 | 46 | const intervalRef = useRef(null); 47 | const isMountedRef = useRef(true); 48 | 49 | useEffect(() => { 50 | startFetching(); 51 | isMountedRef.current = true; 52 | 53 | return () => { 54 | isMountedRef.current = false; 55 | stopFetching(); 56 | }; 57 | }, [inferenceUrl]); 58 | 59 | const startFetching = (): void => { 60 | stopFetching(); 61 | setIsLoading(true); 62 | fetchInferenceData(); 63 | intervalRef.current = window.setInterval(fetchInferenceData, refreshRate); 64 | }; 65 | 66 | const stopFetching = (): void => { 67 | if (intervalRef.current) { 68 | clearInterval(intervalRef.current); 69 | intervalRef.current = null; 70 | } 71 | }; 72 | 73 | const fetchInferenceData = async (): Promise => { 74 | try { 75 | const response = await fetch(inferenceUrl, { 76 | cache: 'no-store', 77 | }); 78 | 79 | if (!isMountedRef.current) return; 80 | 81 | if (!response.ok) { 82 | throw new Error(`HTTP error! Status: ${response.status}`); 83 | } 84 | 85 | const jsonData = await response.json(); 86 | setInferenceData(jsonData); 87 | setError(null); 88 | setIsLoading(false); 89 | } catch (err) { 90 | if (isMountedRef.current) { 91 | const message = err instanceof Error ? err.message : 'Failed to load inference data'; 92 | setError(message); 93 | setIsLoading(false); 94 | console.error('Inference data fetch error:', message); 95 | } 96 | } 97 | }; 98 | 99 | // Render error or loading states conditionally 100 | if (error) { 101 | return ( 102 |
103 |

{error}

104 | 105 |
106 | ); 107 | } 108 | 109 | if (isLoading && !inferenceData) { 110 | return
Loading...
; 111 | } 112 | 113 | if (!inferenceData) { 114 | return
No data available
; 115 | } 116 | 117 | return ( 118 |
119 |

Inference Response

120 | true} 126 | /> 127 |
128 | ); 129 | }; 130 | 131 | export default InferenceDisplay; -------------------------------------------------------------------------------- /examples/react-app/src/components/InferenceInstruction.css: -------------------------------------------------------------------------------- 1 | /* InferenceInstruction.css */ 2 | .inference-instruction { 3 | border: 1px solid #ddd; 4 | border-radius: 8px; 5 | overflow: hidden; 6 | background-color: #272822; 7 | margin: 10px 0; 8 | padding: 15px; 9 | display: flex; 10 | flex-direction: column; 11 | gap: 10px; 12 | } 13 | 14 | .instruction-input { 15 | width: 100%; 16 | padding: 10px; 17 | border: 1px solid #383830; 18 | border-radius: 4px; 19 | background-color: #1e1f1c; 20 | color: #f8f8f2; 21 | font-family: inherit; 22 | font-size: 14px; 23 | resize: vertical; 24 | } 25 | 26 | .instruction-input:focus { 27 | outline: none; 28 | border-color: #66d9ef; 29 | } 30 | 31 | .instruction-controls { 32 | display: flex; 33 | justify-content: space-between; 34 | align-items: center; 35 | } 36 | 37 | .instruction-error { 38 | color: #f92672; 39 | font-size: 14px; 40 | flex-grow: 1; 41 | } 42 | 43 | .instruction-button { 44 | padding: 8px 16px; 45 | border: none; 46 | border-radius: 4px; 47 | background-color: #a6e22e; 48 | color: #272822; 49 | font-weight: 500; 50 | cursor: pointer; 51 | min-width: 100px; 52 | } 53 | 54 | .instruction-button:hover:not(:disabled) { 55 | background-color: #c3f53e; 56 | } 57 | 58 | .instruction-button:disabled { 59 | background-color: #75715e; 60 | cursor: not-allowed; 61 | opacity: 0.7; 62 | } -------------------------------------------------------------------------------- /examples/react-app/src/components/InferenceInstruction.tsx: -------------------------------------------------------------------------------- 1 | // InferenceInstruction.tsx 2 | import React, { useState } from 'react'; 3 | import './InferenceInstruction.css'; 4 | 5 | interface InferenceInstructionProps { 6 | settingsUrl: string; 7 | placeholder?: string; 8 | buttonText?: string; 9 | onSettingsSubmitted?: (success: boolean, response?: any) => void; 10 | } 11 | 12 | const InferenceInstruction: React.FC = ({ 13 | settingsUrl, 14 | placeholder = 'cap en', 15 | buttonText = 'Apply', 16 | onSettingsSubmitted 17 | }) => { 18 | const [prompt, setPrompt] = useState(''); 19 | const [isSubmitting, setIsSubmitting] = useState(false); 20 | const [error, setError] = useState(null); 21 | 22 | const handleInputChange = (e: React.ChangeEvent) => { 23 | setPrompt(e.target.value); 24 | if (error) setError(null); 25 | }; 26 | 27 | const handleSubmit = async () => { 28 | if (!prompt.trim()) { 29 | setError('Please enter prompt text'); 30 | return; 31 | } 32 | 33 | setIsSubmitting(true); 34 | setError(null); 35 | 36 | try { 37 | const response = await fetch(settingsUrl, { 38 | method: 'POST', 39 | headers: { 40 | 'Content-Type': 'application/json', 41 | }, 42 | body: JSON.stringify({ prompt: prompt.trim() + '\n' }), 43 | }); 44 | 45 | if (!response.ok) { 46 | throw new Error(`HTTP error! Status: ${response.status}`); 47 | } 48 | 49 | const result = await response.json(); 50 | if (onSettingsSubmitted) { 51 | onSettingsSubmitted(true, result); 52 | } 53 | 54 | // Optional: clear the input after successful submission 55 | // setInstruction(''); 56 | } catch (err) { 57 | const message = err instanceof Error ? err.message : 'Failed to submit instructions'; 58 | setError(message); 59 | console.error('Instruction submission error:', message); 60 | if (onSettingsSubmitted) { 61 | onSettingsSubmitted(false); 62 | } 63 | } finally { 64 | setIsSubmitting(false); 65 | } 66 | }; 67 | 68 | return ( 69 |
70 |