├── LICENSE
├── README.md
├── assets
└── logo.png
├── gallery
├── gallery.yaml
└── templates
│ ├── chroma.yaml
│ ├── cloudflare_tunnel.yaml
│ ├── helix-runner.yaml
│ ├── llama3.yaml
│ ├── llocal-search.yaml
│ ├── lorax.yaml
│ ├── n8n.yaml
│ ├── nodered.yaml
│ ├── nvidia-nim-llm.yaml
│ ├── nvidia-triton.yaml
│ ├── ollama.yaml
│ ├── open-webui.yaml
│ ├── opendevin.yaml
│ ├── replicate-cog.yaml
│ ├── uptime-kuma.yaml
│ ├── vllm.yaml
│ ├── webhookrelay_forward.yaml
│ └── webhookrelay_tunnel.yaml
└── samples
├── calendso
├── README.md
└── calendso-synpse-caddy.yaml
├── clickhouse
├── README.md
├── clickhouse-synpse.yaml
└── config.xml
├── collab
└── collab-gpu.yaml
├── drone
├── README.md
├── drone-runner-synpse.yaml
└── drone-synpse.yaml
├── firefox
├── README.md
└── firefox.yaml
├── gladys-assistant
├── README.md
└── gladys.yaml
├── grafana
├── README.md
├── grafana-synpse-webhookrelay.yaml
├── grafana-synpse.yaml
└── grafana.ini
├── home-assistant
├── README.md
├── ha-synpse-webhookrelay.yaml
└── ha-synpse.yaml
├── jupyterlab
├── README.md
├── jupyterlab-gpu.yaml
└── jupyterlab.yaml
├── nfs-server
├── nfs-client.yaml
└── nfs-server.yaml
├── nginx
└── nginx.yaml
├── node-red
├── README.md
└── nodered-synpse.yaml
├── ollama
└── ollama.yaml
├── owncloud
└── owncloud-synpse.yaml
├── pihole
└── pihole-synpse.yaml
├── prometheus
├── README.md
├── prometheus-config.yml
├── prometheus-synpse-webhookrelay.yaml
└── prometheus-synpse.yaml
├── uptime-kuma
├── README.md
├── uptime-kuma-caddy.yaml
├── uptime-kuma-webhookrelay.yaml
├── uptime-kuma.png
└── uptime-kuma.yaml
├── vlllm
└── vllm-mistral.yaml
└── webhookrelay
└── webhookrelay-synpse.yaml
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |

4 |
5 |
6 | **The easiest way to bootstrap your devices and deploy applications.
7 | Synpse manages OTA deployment & updates, provides SSH and network access.**
8 |
9 | ---
10 |
11 |
12 | Website •
13 | Samples •
14 | Discussions •
15 | Docs •
16 | Discord •
17 | Cloud •
18 | Buy us a COFFEE
19 |
20 |
21 |
22 |
23 |
24 | ## Synpse.NET - device orchestration for the rest of us
25 |
26 | Synpse provides your device fleet management, application deployment and their configuration. Whole process is simple with very low learning curve.
27 |
28 | ## Key features
29 |
30 | - Device inventory management: each of your device will register as an entry in our database and will be visible via UI/CLI/Dashboard.
31 | - SSH/TCP connections to your devices via tunnels: you don't need to have a public IP on your device to have access to it.
32 | - Declarative application deployment: store your manifests in GitHub, Gitlab or any other SCM repository, deploy applications via UI or CLI.
33 | - Device filtering for grouping and application scheduling: use labels and selectors to deploy applications to a subset of your devices for A/B testing.
34 | - Secret management: Synpse provides encrypted secret store to provide sensitive configuration to your applications.
35 | - Namespaces: separate your applications and secrets using namespaces on the same device.
36 |
37 | ## Supported platforms
38 |
39 | Synpse currently supports all Linux based distributions. It's possible to run it on Darwin (MacOS) systems too, but you will need to install the agent as a daemon yourself.
40 |
41 | Windows support is planned, using binary executable drivers, however it's not a prioritized feature yet. If you would like to see Windows support implemented sooner, please contact us.
42 |
43 |
44 |
45 |
46 | Platform |
47 | Architecture |
48 | Status |
49 |
50 |
51 |
52 |
53 | Linux |
54 | amd64 |
55 | ✅ |
56 |
57 |
58 | aarch64 |
59 | ✅ |
60 |
61 |
62 | arm32 |
63 | ✅ |
64 |
65 |
66 | Darwin |
67 | amd64 |
68 | ⏳ |
69 |
70 |
71 | aarch64 |
72 | ⏳ |
73 |
74 |
75 | Windows |
76 | amd64 |
77 | ⏳ |
78 |
79 |
80 |
81 |
82 | ## Samples
83 |
84 | You can view samples of applications deployed on Synpse in the [samples/](https://github.com/synpse-hq/synpse/tree/main/samples) directory. Feel free to submit a pull request with your favorite app!
85 |
86 | - [Cal.com](samples/calendso) - easy meeting scheduling
87 | - [Grafana](samples/grafana) - monitoring/metrics stack
88 | - [Clickhouse] (samples/clickhouse) - column-oriented database management system (DBMS)
89 | - [Prometheus](samples/prometheus) - metrics collector, database and query engine
90 | - [Home Assistant](samples/home-assistant) - self-hosted home automation hub that supports thousands of integrations
91 | - [Gladys Home Assistant](samples/gladys-assistant) - a lightweight and privacy focused home assistant
92 | - [Node-RED](samples/node-red) - no-code automation solution for anything from home automation to industrial applications
93 | - [ownCloud](samples/owncloud) - privacy focused essential business tool
94 | - [Firefox](samples/firefox) - web browser
95 | - [Drone CI/CD](samples/drone) - self-hosted CI/CD solution
96 | - [Jupyter Labs](samples/jupyterlab/) - web-based interactive development environment for Jupyter notebooks
97 | - [piHole](samples/pihole) - network wide ad blocking
98 | - [uptime-kuma](samples/uptime-kuma) - self-hosted monitoring solution for your websites (uptimerobot/pingdom alternative)
99 | - [webhookrelay](samples/webhookrelay) - integration with webhookrelay
100 | - [NFS server](samples/nfs-server) - NFS server in the container
101 |
102 | ## Community
103 |
104 | Synpse is a young project and our community is constantly growing. Join our [Discord channel](https://discord.gg/dkgN4vVNdm) or participate in [GitHub Discussions](https://github.com/synpse-hq/synpse/discussions).
105 |
106 | ## Bug reporting/getting help
107 |
108 | If you get stuck or not sure how to achieve something or just want to request a new feature, you can try:
109 |
110 | 1. Read the docs: https://docs.synpse.net
111 | 2. Submit an issue here: https://github.com/synpse-hq/synpse/issues
112 |
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/synpse-hq/synpse/85f9092fdb43007ce2d9ed7d9c3e9322b3992446/assets/logo.png
--------------------------------------------------------------------------------
/gallery/gallery.yaml:
--------------------------------------------------------------------------------
1 | featured:
2 | - ollama
3 | - open-webui
4 | - vllm-mistral-7b
5 | - vllm-gemma-7b
6 | - incredibly-fast-whisper
7 | - sdxl
8 | - clarity-upscaler
9 | - llava-13b
10 | - google-research/maxim
11 | - helix-runner
12 |
13 | templates:
14 | - id: ollama
15 | organization: Ollama
16 | name: Ollama
17 | categories:
18 | - AI
19 | - Text
20 | - Inference
21 | # imageUrl: 'https://avatars.githubusercontent.com/u/151674099?s=200&v=4'
22 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/ollama.png'
23 | description: 'Run Llama 2, Code Llama, and other models on CPU or GPU machines.'
24 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/ollama.yaml
25 | gpu: optional
26 | parameters:
27 | - name: Port
28 | description: "Port to expose Ollama API on"
29 | type: int
30 | default: "11434"
31 | - name: ModelDirectory
32 | description: "Directory to store model files"
33 | type: text
34 | default: "/data/ollama"
35 |
36 | - id: nvidia-nim-llama3-8b-instruct
37 | organization: Nvidia
38 | name: NIM llama-3-8b-instruct
39 | categories:
40 | - AI
41 | - Text
42 | - Inference
43 | - NIM
44 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/nim-llama3.webp'
45 | description: 'Run llama3 8b instruct model using NVIDIA NIM on a GPU machine.'
46 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/nvidia-nim-llm.yaml
47 | gpu: required
48 | parameters:
49 | - name: Port
50 | description: "Port to expose NVIDIA NIM API server on"
51 | type: int
52 | default: "8000"
53 | - name: ModelImage
54 | description: "Model image to use from the nvcr image registry"
55 | type: text
56 | default: "llama3-8b-instruct:1.0.0"
57 | - name: ModelName
58 | description: "Model name to use in the API calls"
59 | type: text
60 | default: "meta/llama3-8b-instruct"
61 | - name: ModelDirectory
62 | description: "Directory to store model files"
63 | type: text
64 | default: "/data/nvidia"
65 | - name: NgcApiKey
66 | description: "NGC API key, get yours from https://org.ngc.nvidia.com/setup/api-key"
67 | type: text
68 | default: ""
69 |
70 | - id: ollama-llama3-8b
71 | organization: Ollama
72 | name: llama3 8b
73 | categories:
74 | - AI
75 | - Text
76 | - Inference
77 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/llama3.png'
78 | description: 'Run Llama 3 on-prem.'
79 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/llama3.yaml
80 | gpu: required
81 | parameters:
82 | - name: Port
83 | description: "Port to expose Ollama API on"
84 | type: int
85 | default: "11434"
86 | - name: Tag
87 | description: "Model variant. Check https://ollama.com/library/llama3/tags for additional versions."
88 | type: text
89 | default: "instruct"
90 | - name: ModelDirectory
91 | description: "Directory to store model files"
92 | type: text
93 | default: "/data/ollama"
94 |
95 | - id: opendevin
96 | organization: OpenDevin
97 | name: OpenDevin
98 | categories:
99 | - AI
100 | imageUrl: https://storage.googleapis.com/downloads.synpse.net/gallery-assets/opendevin_cover.png
101 | description: OpenDevin is an autonomous AI software engineer capable of executing complex engineering tasks.
102 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/opendevin.yaml
103 | gpu: optional
104 | parameters:
105 | - name: Port
106 | description: "Port to expose Ollama API on"
107 | type: int
108 | default: "3000"
109 | - name: Tag
110 | description: "OpenDevin version. Check https://github.com/OpenDevin/OpenDevin/releases for other versions"
111 | type: text
112 | default: "0.6.2"
113 | - name: WorkspaceBase
114 | description: "Workspace directory"
115 | type: text
116 | default: "/data/opendevin-workspace"
117 | - name: SshPassword
118 | description: "SSH password"
119 | type: text
120 | default: "verysecret"
121 |
122 | - id: ollama-llama3-70b
123 | organization: Ollama
124 | name: llama3 70b
125 | categories:
126 | - AI
127 | - Text
128 | - Inference
129 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/llama3.png'
130 | description: 'Run quantized Llama 3 70 on-prem.'
131 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/llama3.yaml
132 | gpu: required
133 | parameters:
134 | - name: Port
135 | description: "Port to expose Ollama API on"
136 | type: int
137 | default: "11434"
138 | - name: Tag
139 | description: "Model variant. Check https://ollama.com/library/llama3/tags for additional versions."
140 | type: text
141 | default: "70b-instruct-q5_1"
142 | - name: ModelDirectory
143 | description: "Directory to store model files"
144 | type: text
145 | default: "/data/ollama"
146 |
147 | - id: open-webui
148 | organization: open-webui
149 | name: Open WebUI
150 | categories:
151 | - AI
152 | - Text
153 | - Chat
154 | # imageUrl: 'https://avatars.githubusercontent.com/u/151674099?s=200&v=4'
155 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/open-webui.png'
156 | description: 'Open WebUI is an extensible, feature-rich, and user-friendly self-hosted WebUI designed to operate entirely offline.'
157 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/open-webui.yaml
158 | gpu: optional
159 | parameters:
160 | - name: Port
161 | description: "Port to expose web UI on"
162 | type: int
163 | default: "8844"
164 | - name: ModelDirectory
165 | description: "Directory to store model files and configuration in"
166 | type: text
167 | default: "/data/open-webui"
168 |
169 | - id: llocal-search
170 | organization: nilsherzig
171 | name: LLocalSearch
172 | categories:
173 | - AI
174 | - Text
175 | - Chat
176 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/llocalsearch.png'
177 | description: 'LLocalSearch is a completely locally running search aggregator using LLM Agents. Ask a question and the system will use a chain of LLMs to find the answer. No OpenAI or Google API keys are needed.'
178 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/llocal-search.yaml
179 | gpu: required
180 | parameters:
181 | - name: Port
182 | description: "Port to expose web UI on"
183 | type: int
184 | default: "4173"
185 | - name: ChromaDataDir
186 | description: "Directory to store Chroma data in"
187 | type: text
188 | default: "/data/chroma"
189 | - name: OllamaDataDir
190 | description: "Directory to store Ollama models data in"
191 | type: text
192 | default: "/data/ollama"
193 | - name: RedisDataDir
194 | description: "Directory to store Redis data in"
195 | type: text
196 | default: "/data/redis"
197 | - name: SearxngDataDir
198 | description: "Directory to store searxng data in"
199 | type: text
200 | default: "/data/searxng"
201 |
202 | - id: lorax
203 | organization: Predibase
204 | name: Lorax
205 | categories:
206 | - AI
207 | - Text
208 | - Inference
209 | imageUrl: 'https://synpse.com/cdn-cgi/image/width=338,quality=75/https://storage.googleapis.com/downloads.synpse.net/gallery-assets/lorax.png'
210 | description: 'LoRAX (LoRA eXchange) is a framework that allows users to serve thousands of fine-tuned models on a single GPU, dramatically reducing the cost of serving without compromising on throughput or latency.'
211 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/lorax.yaml
212 | gpu: required
213 | parameters:
214 | - name: Port
215 | description: "Port to expose Lorax API on"
216 | type: int
217 | default: "8080"
218 | - name: Model
219 | description: "Model ID to serve (base model https://loraexchange.ai/models/base_models/)"
220 | type: text
221 | default: "mistralai/Mistral-7B-Instruct-v0.1"
222 | - name: HuggingFaceToken
223 | description: "You can access private base models from HuggingFace by your token"
224 | type: text
225 | default: ""
226 | - name: ModelDirectory
227 | description: "Directory to store model files"
228 | type: text
229 | default: "/data/lorax"
230 |
231 | - id: vllm-mistral-7b
232 | organization: Mistral AI
233 | name: Mistral 7B (vLLM)
234 | categories:
235 | - AI
236 | - Text
237 | - vLLM
238 | - Inference
239 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/mistral-7b-cover.png'
240 | description: 'Run Mistral 7B model on a GPU machine. Exposes OpenAI compatible API.'
241 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/vllm.yaml
242 | gpu: required
243 | parameters:
244 | - name: Port
245 | description: "Port to expose API on"
246 | type: int
247 | default: "8000"
248 | - name: ModelDirectory
249 | description: "Directory to store model files"
250 | type: text
251 | default: "/data/vllm"
252 | - name: Model
253 | description: "Model name (supported models: https://docs.vllm.ai/en/latest/models/supported_models.html)"
254 | type: text
255 | readonly: true
256 | default: "mistralai/Mistral-7B-v0.1"
257 | - name: HuggingFaceToken
258 | description: "Hugging Face API token, get yours here https://huggingface.co/docs/hub/en/security-tokens"
259 | type: text
260 | default: ""
261 | #
262 | # Google Gemma models
263 | #
264 | - id: vllm-gemma-7b
265 | organization: Google
266 | name: Gemma 7B (vLLM)
267 | categories:
268 | - AI
269 | - Text
270 | - Inference
271 | imageUrl: 'https://storage.googleapis.com/gweb-uniblog-publish-prod/images/gemma-header.width-1600.format-webp.webp'
272 | description: 'Run Google Gemma 7B model on a GPU machine. Exposes OpenAI compatible API.'
273 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/vllm.yaml
274 | gpu: required
275 | parameters:
276 | - name: Port
277 | description: "Port to expose API on"
278 | type: int
279 | default: "8000"
280 | - name: ModelDirectory
281 | description: "Directory to store model files"
282 | type: text
283 | default: "/data/vllm"
284 | - name: Model
285 | description: "Model name (supported models: https://docs.vllm.ai/en/latest/models/supported_models.html)"
286 | type: text
287 | readonly: true
288 | default: "google/gemma-2b"
289 | - name: HuggingFaceToken
290 | description: "Hugging Face API token, get yours here https://huggingface.co/docs/hub/en/security-tokens"
291 | type: text
292 | default: ""
293 | - id: vllm-gemma-2b
294 | organization: Google
295 | name: Gemma 2B (vLLM)
296 | categories:
297 | - AI
298 | - Text
299 | - Inference
300 | imageUrl: 'https://storage.googleapis.com/gweb-uniblog-publish-prod/images/gemma-header.width-1600.format-webp.webp'
301 | description: 'Run Google Gemma 2B model on a GPU machine. Exposes OpenAI compatible API.'
302 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/vllm.yaml
303 | gpu: required
304 | parameters:
305 | - name: Port
306 | description: "Port to expose API on"
307 | type: int
308 | default: "8000"
309 | - name: ModelDirectory
310 | description: "Directory to store model files"
311 | type: text
312 | default: "/data/vllm"
313 | - name: Model
314 | description: "Model name (supported models: https://docs.vllm.ai/en/latest/models/supported_models.html)"
315 | type: text
316 | default: "google/gemma-2b"
317 | readonly: true
318 | - name: HuggingFaceToken
319 | description: "Hugging Face API token, get yours here https://huggingface.co/docs/hub/en/security-tokens"
320 | type: text
321 | default: ""
322 | #
323 | # Replicate Cogs
324 | #
325 | - id: incredibly-fast-whisper
326 | organization: vaibhavs10
327 | name: Incredibly Fast Whisper
328 | categories:
329 | - AI
330 | - Audio
331 | - Inference
332 | imageUrl: 'https://tjzk.replicate.delivery/models_models_featured_image/4c5d637c-c441-4857-9791-7c11111b38b4/52ebbd85-50a7-4741-b398-30e31.webp'
333 | description: 'whisper-large-v3, incredibly fast, powered by Hugging Face Transformers!'
334 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/replicate-cog.yaml
335 | gpu: required
336 | parameters:
337 | - name: Port
338 | description: "Port to expose API on"
339 | type: int
340 | default: "5000"
341 | - name: CogImage
342 | description: "Model - https://replicate.com/vaibhavs10/incredibly-fast-whisper?input=docker"
343 | type: text
344 | default: "r8.im/vaibhavs10/incredibly-fast-whisper@sha256:3ab86df6c8f54c11309d4d1f930ac292bad43ace52d10c80d87eb258b3c9f79c"
345 | readonly: true
346 | - name: Usage
347 | readonly: true
348 | default: 'This model runs on Nvidia A40 (Large) GPU hardware. Predictions typically complete within 47 seconds. The predict time for this model varies significantly based on the inputs.'
349 | - name: InputExample
350 | readonly: true
351 | default:
352 | |
353 | "input": {
354 | "task": "transcribe",
355 | "audio": "https://replicate.delivery/pbxt/Js2Fgx9MSOCzdTnzHQLJXj7abLp3JLIG3iqdsYXV24tHIdk8/OSR_uk_000_0050_8k.wav",
356 | "language": "None",
357 | "timestamp": "chunk",
358 | "batch_size": 64,
359 | "diarise_audio": false
360 | }
361 |
362 | - id: sdxl
363 | organization: Stability AI
364 | name: Stable Diffusion XL
365 | categories:
366 | - AI
367 | - Images
368 | - Inference
369 | imageUrl: 'https://tjzk.replicate.delivery/models_models_featured_image/9065f9e3-40da-4742-8cb8-adfa8e794c0d/sdxl_cover.jpg'
370 | description: 'A text-to-image generative AI model that creates beautiful images'
371 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/replicate-cog.yaml
372 | gpu: required
373 | parameters:
374 | - name: Port
375 | description: "Port to expose API on"
376 | type: int
377 | default: "5000"
378 | - name: CogImage
379 | description: "Model - https://replicate.com/stability-ai/sdxl?input=docker"
380 | type: text
381 | default: "r8.im/stability-ai/sdxl@sha256:39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b"
382 | readonly: true
383 | - name: Usage
384 | readonly: true
385 | default: 'This model runs on Nvidia A40 (Large) GPU hardware. Predictions typically complete within 12 seconds.'
386 | - name: InputExample
387 | readonly: true
388 | default:
389 | |
390 | "input": {
391 | "width": 768,
392 | "height": 768,
393 | "prompt": "An astronaut riding a rainbow unicorn, cinematic, dramatic",
394 | "refine": "expert_ensemble_refiner",
395 | "scheduler": "K_EULER",
396 | "lora_scale": 0.6,
397 | "num_outputs": 1,
398 | "guidance_scale": 7.5,
399 | "apply_watermark": false,
400 | "high_noise_frac": 0.8,
401 | "negative_prompt": "",
402 | "prompt_strength": 0.8,
403 | "num_inference_steps": 25
404 | }
405 |
406 | - id: clarity-upscaler
407 | organization: philz1337x
408 | name: Clarity Upscaler
409 | categories:
410 | - AI
411 | - Images
412 | - Inference
413 | imageUrl: 'https://tjzk.replicate.delivery/models_models_cover_image/bc4eb965-f228-456f-96e2-430af00ca582/Bildschirmfoto_2024-03-20_um_07.2.png'
414 | description: 'High resolution image Upscaler and Enhancer'
415 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/replicate-cog.yaml
416 | gpu: required
417 | parameters:
418 | - name: Port
419 | description: "Port to expose API on"
420 | type: int
421 | default: "5000"
422 | - name: CogImage
423 | description: "Model - https://replicate.com/philz1337x/clarity-upscaler?input=docker"
424 | type: text
425 | default: "r8.im/philz1337x/clarity-upscaler@sha256:abd484acb51ad450b06f42f76940fa5c1b37511dbf70ac8594fdacd5c3302307"
426 | readonly: true
427 | - name: Usage
428 | readonly: true
429 | default: 'This model runs on Nvidia A100 (80GB) GPU hardware. Predictions typically complete within 39 seconds. The predict time for this model varies significantly based on the inputs.'
430 | - name: InputExample
431 | readonly: true
432 | default:
433 | |
434 | "input": {
435 | "seed": 1337,
436 | "image": "https://replicate.delivery/pbxt/KZVIDUcU15XCjloQMdqitfzi6pau7rO70IuGgdRAyHgku70q/13_before.png",
437 | "prompt": "masterpiece, best quality, highres, ",
438 | "dynamic": 6,
439 | "sd_model": "juggernaut_reborn.safetensors [338b85bc4f]",
440 | "scheduler": "DPM++ 3M SDE Karras",
441 | "creativity": 0.35,
442 | "lora_links": "",
443 | "downscaling": false,
444 | "resemblance": 0.6,
445 | "scale_factor": 2,
446 | "tiling_width": 112,
447 | "tiling_height": 144,
448 | "negative_prompt": "(worst quality, low quality, normal quality:2) JuggernautNegative-neg",
449 | "num_inference_steps": 18,
450 | "downscaling_resolution": 768
451 | }
452 |
453 | - id: llava-13b
454 | organization: yorickvp
455 | name: LLAVA 13B
456 | categories:
457 | - AI
458 | - Images
459 | - Inference
460 | imageUrl: 'https://tjzk.replicate.delivery/models_models_featured_image/454548d6-4978-4d85-bca3-d067dfc031bf/llava.png'
461 | description: 'Visual instruction tuning towards large language and vision models with GPT-4 level capabilities'
462 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/replicate-cog.yaml
463 | gpu: required
464 | parameters:
465 | - name: Port
466 | description: "Port to expose API on"
467 | type: int
468 | default: "5000"
469 | - name: CogImage
470 | description: "Model - https://replicate.com/yorickvp/llava-13b?input=docker"
471 | type: text
472 | default: "r8.im/yorickvp/llava-13b@sha256:01359160a4cff57c6b7d4dc625d0019d390c7c46f553714069f114b392f4a726"
473 | readonly: true
474 | - name: Usage
475 | readonly: true
476 | default: 'This model runs on Nvidia A40 (Large) GPU hardware. Predictions typically complete within 5 seconds.'
477 | - name: InputExample
478 | readonly: true
479 | default:
480 | |
481 | "input": {
482 | "image": "https://replicate.delivery/pbxt/KRULC43USWlEx4ZNkXltJqvYaHpEx2uJ4IyUQPRPwYb8SzPf/view.jpg",
483 | "top_p": 1,
484 | "prompt": "Are you allowed to swim here?",
485 | "max_tokens": 1024,
486 | "temperature": 0.2
487 | }
488 |
489 |
490 | - id: google-research/maxim
491 | organization: Google
492 | name: Multi-Axis MLP for Image Processing
493 | categories:
494 | - AI
495 | imageUrl: 'https://tjzk.replicate.delivery/models_models_featured_image/df5769aa-0908-4a2e-9378-c582838461db/1fromGOPR0950.png'
496 | description: 'Visual instruction tuning towards large language and vision models with GPT-4 level capabilities'
497 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/replicate-cog.yaml
498 | gpu: required
499 | parameters:
500 | - name: Port
501 | description: "Port to expose API on"
502 | type: int
503 | default: "5000"
504 | - name: CogImage
505 | description: "Model - https://replicate.com/google-research/maxim?input=docker"
506 | type: text
507 | default: "r8.im/google-research/maxim@sha256:494ca4d578293b4b93945115601b6a38190519da18467556ca223d219c3af9f9"
508 | readonly: true
509 | - name: Usage
510 | readonly: true
511 | default: 'This model runs on Nvidia T4 GPU hardware. Predictions typically complete within 4 minutes. The predict time for this model varies significantly based on the inputs.'
512 | - name: InputExample
513 | readonly: true
514 | default:
515 | |
516 | "input": {
517 | "image": "https://replicate.delivery/mgxm/6707a57f-4957-4047-b020-2160aed1d27a/1fromGOPR0950.png",
518 | "model": "Image Deblurring (GoPro)"
519 | }
520 |
521 | # Helix
522 |
523 | - id: helix-runner
524 | organization: Helix ML
525 | name: Runner
526 | categories:
527 | - Finetuning
528 | - Chat
529 | - APIs
530 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/helix-resized.png'
531 | description: 'Attach a runner to your self-hosted Helix ML platform (https://tryhelix.ai)'
532 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/helix-runner.yaml
533 | gpu: required
534 | parameters:
535 | - name: ApiHost
536 | description: "API host of your Helix ML platform where controlplane is running"
537 | type: text
538 | default: ""
539 | - name: ModelDirectory
540 | description: "Directory to store model files"
541 | type: text
542 | default: "/data/helix"
543 | - name: ApiToken
544 | description: "Helix controlplane API token for the runners"
545 | type: text
546 | default: ""
547 |
548 | # Nvidia
549 |
550 | - id: nvidia-triton-inference-server
551 | organization: Nvidia
552 | name: Triton Inference Server
553 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/og-gtc-22-triton-web-100.jpg'
554 | description: 'Triton Inference Server provides a cloud and edge inferencing solution'
555 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/nvidia-triton.yaml
556 | gpu: optional
557 | parameters:
558 | - name: ModelDirectory
559 | description: "Model repository, populate this directory with your models on the host machine"
560 | type: text
561 | default: ""
562 |
563 | # Uptime kuma
564 |
565 | - id: uptime-monitoring
566 | categories:
567 | - Monitoring
568 | organization: kuma
569 | name: Uptime Kuma
570 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/kuma.jpg'
571 | description: 'Uptime Kuma is an easy-to-use self-hosted monitoring tool. Integrates with popular services like Slack and Discord and provides a nice looking status page for your services.'
572 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/uptime-kuma.yaml
573 | parameters:
574 | - name: Port
575 | description: "Port on which the web UI will be exposed"
576 | type: text
577 | default: "3001"
578 | - name: DataDir
579 | description: "Directory to store Uptime Kuma data"
580 | type: text
581 | default: "/data/uptime-kuma"
582 |
583 | # Node-RED
584 |
585 | - id: nodered
586 | categories:
587 | - IoT
588 | - Automation
589 | organization: nodered
590 | name: Node-RED
591 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/nodered.png'
592 | description: 'Node-RED is a programming tool for wiring together hardware devices, APIs and online services in new and interesting ways.'
593 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/nodered.yaml
594 | parameters:
595 | - name: Port
596 | description: "Port on which the web UI will be exposed"
597 | type: text
598 | default: "1880"
599 | - name: DataDir
600 | description: "Directory to store Node-RED data (flows, user data, settings)"
601 | type: text
602 | default: "/data/nodered"
603 |
604 | # n8n
605 |
606 | - id: n8n
607 | categories:
608 | - IoT
609 | - Automation
610 | organization: n8n
611 | name: n8n
612 | imageUrl: 'https://synpse.com/cdn-cgi/image/width=338,quality=75/https://storage.googleapis.com/downloads.synpse.net/gallery-assets/n8n.png'
613 | description: 'Build powerful workflows, really fast. N8n is a self-hosted Zapier/Make alternative'
614 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/n8n.yaml
615 | parameters:
616 | - name: Port
617 | description: "Port on which the web UI will be exposed"
618 | type: text
619 | default: "1880"
620 | - name: DataDir
621 | description: "Directory to store N8n data"
622 | type: text
623 | default: "/data/n8n"
624 |
625 | # Databases
626 |
627 | - id: chroma
628 | categories:
629 | - Database
630 | - Embeddings
631 | organization: chroma
632 | name: Chroma
633 | imageUrl: 'https://storage.googleapis.com/downloads.synpse.net/gallery-assets/chroma.png'
634 | description: 'Chroma is a self-hosted database for embeddings and AI models. It is designed to be fast, scalable, and easy to use.'
635 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/chroma.yaml
636 | parameters:
637 | - name: Port
638 | description: "Port on which the API will be exposed"
639 | type: text
640 | default: "8000"
641 | - name: DataDir
642 | description: "Directory to store Chroma data"
643 | type: text
644 | default: "/data/chroma"
645 |
646 | # Networks
647 |
648 | - id: webhookrelay-tunnel
649 | categories:
650 | - Networking
651 | organization: webhookrelay
652 | name: Webhook Relay Tunnel
653 | imageUrl: 'https://synpse.com/cdn-cgi/image/width=338,quality=75/https://storage.googleapis.com/downloads.synpse.net/gallery-assets/whr_tunnel_cover.png'
654 | description: 'Webhook Relay tunnels can expose your local web servers to the internet. Use to expose APIs and websites straight from your laptop.'
655 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/webhookrelay_tunnel.yaml
656 | parameters:
657 | - name: RelayKey
658 | description: "Get your API token and secret from https://my.webhookrelay.com/tokens"
659 | type: text
660 | default: ""
661 | - name: RelaySecret
662 | description: "Get your API token and secret from https://my.webhookrelay.com/tokens"
663 | type: text
664 | default: ""
665 | - name: Tunnels
666 | description: "One or more tunnel names separated by comma, create them in https://my.webhookrelay.com/tunnels"
667 | type: text
668 | default: ""
669 |
670 | - id: webhookrelay-forwarding
671 | categories:
672 | - Networking
673 | organization: webhookrelay
674 | name: Webhook Relay Forwarding
675 | imageUrl: 'https://synpse.com/cdn-cgi/image/width=338,quality=75/https://storage.googleapis.com/downloads.synpse.net/gallery-assets/whr-landing2.png'
676 | description: 'Webhook Relay forwarding allows you to receive webhooks in CI/CD systems and developer machines.'
677 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/webhookrelay_forward.yaml
678 | parameters:
679 | - name: RelayKey
680 | description: "Get your API token and secret from https://my.webhookrelay.com/tokens"
681 | type: text
682 | default: ""
683 | - name: RelaySecret
684 | description: "Get your API token and secret from https://my.webhookrelay.com/tokens"
685 | type: text
686 | default: ""
687 | - name: Buckets
688 | description: "One or more bucket names separated by comma, create them in https://my.webhookrelay.com/buckets"
689 | type: text
690 | default: ""
691 |
692 | - id: cloudflare
693 | categories:
694 | - Networking
695 | organization: Cloudflare
696 | name: Cloudflare Tunnel
697 | imageUrl: 'https://synpse.com/cdn-cgi/image/width=338,quality=75/https://storage.googleapis.com/downloads.synpse.net/gallery-assets/cloudflare.png'
698 | description: 'Cloudflare Tunnel provides you with a secure way to connect your resources to Cloudflare without a publicly routable IP address'
699 | specUrl: https://raw.githubusercontent.com/synpse-hq/synpse/main/gallery/templates/cloudflare_tunnel.yaml
700 | parameters:
701 | - name: Token
702 | description: "Create a remotely managed tunnel here https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/get-started/create-remote-tunnel/"
703 | type: text
704 | default: ""
705 |
706 |
--------------------------------------------------------------------------------
/gallery/templates/chroma.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | {{ .Usage }}
3 | examples:
4 | - title: Accessing Chroma database
5 | description: |
6 | You can accesss the API on {{ .AppURL | default "http://localhost:8000" }}. For additional settings
7 | check out https://docs.trychroma.com/deployment as you can continue modifying the deployment
8 | once you have created the application.
9 | code: |
10 | import chromadb
11 | chroma_client = chromadb.HttpClient(host='localhost', port=8000)
12 | ---
13 |
14 | name: chromadb
15 | scheduling:
16 | type: Conditional
17 | selectors: {}
18 | spec:
19 | containers:
20 | - name: chroma
21 | image: ghcr.io/chroma-core/chroma:latest
22 | env:
23 | - name: IS_PERSISTENT
24 | value: "TRUE"
25 | volumes:
26 | - {{ .DataDir | default "/data/chroma" }}:/chroma/chroma/
27 | ports:
28 | - {{ .Port | default "8000" }}:8000
29 |
--------------------------------------------------------------------------------
/gallery/templates/cloudflare_tunnel.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | Cloudflare Tunnel provides you with a secure way to connect your resources to
3 | Cloudflare without a publicly routable IP address. With Tunnel, you do not send
4 | traffic to an external IP — instead, a lightweight daemon in your infrastructure
5 | (‘cloudflared’) creates outbound-only connections to Cloudflare’s global network.
6 |
7 | More info: https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/get-started/.
8 |
9 |
10 | ---
11 | name: {{ .SynpseTemplateID }}
12 | scheduling:
13 | type: Conditional
14 | selectors: {}
15 | spec:
16 | containers:
17 | - name: cloudflared
18 | image: cloudflare/cloudflared:latest
19 | networkMode: host
20 | args:
21 | - tunnel
22 | - run
23 | - --no-autoupdate
24 | env:
25 | - name: TUNNEL_TOKEN
26 | value: "{{ .Token }}"
27 | restartPolicy: {}
28 |
29 |
--------------------------------------------------------------------------------
/gallery/templates/helix-runner.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | Once you have your Helix controlplane running (https://docs.helix.ml/helix/private-deployment/)
3 | you can attach a Helix runner to it.
4 |
5 | After attaching the runner, you should see it in your runners dashboard.
6 | ---
7 | name: helix-runner
8 | scheduling:
9 | type: Conditional
10 | spec:
11 | containers:
12 | - name: runner
13 | image: europe-docker.pkg.dev/helixml/helix/runner:latest
14 | args:
15 | - --api-host
16 | - {{ .ApiHost | default "https://app.tryhelix.ai" }}
17 | - --api-token
18 | - {{ .ApiToken}}
19 | - --runner-id
20 | - ${SYNPSE_DEVICE_NAME}
21 | - --memory
22 | - 24GB
23 | - --allow-multiple-copies
24 | gpus: all
25 | ipcMode: host
26 | ulimit:
27 | memlock: "-1"
28 | stack: "67108864"
29 | volumes:
30 | - {{ .ModelDirectory | default "/data/helix" }}:/root/.cache/huggingface
31 | restartPolicy: {}
32 |
--------------------------------------------------------------------------------
/gallery/templates/llama3.yaml:
--------------------------------------------------------------------------------
1 | usage:
2 | |
3 | Run Llama3 as an API server. To interact with the API
4 | refer to https://github.com/ollama/ollama/blob/main/docs/api.md
5 | examples:
6 | - title: Generate a chat completion
7 | description: "To interact with the model use the chat endpoint:"
8 | code: |
9 | curl {{ .AppURL | default "http://localhost:11434" }}/api/chat -d '{
10 | "model": "llama3",
11 | "messages": [
12 | {
13 | "role": "user",
14 | "content": "why is the sky blue?"
15 | }
16 | ]
17 | }'
18 |
19 | ---
20 | name: {{ .SynpseTemplateID }}
21 | scheduling:
22 | type: Conditional
23 | # selectors:
24 | # key: "value" # For selecting devices by labels
25 | # devices: # For selecting devices by name or ID
26 | # - device_name
27 | spec:
28 | containers:
29 | - name: ollama
30 | image: ollama/ollama:latest
31 | gpus: all
32 | entrypoint:
33 | - /bin/bash
34 | args:
35 | - -ec
36 | - |
37 | #!/usr/bin/env bash
38 | ollama serve & sleep 1 && ollama run {{ .Tag | default "llama3" }} && wait
39 | ports:
40 | - {{ .Port | default "11434" }}:11434
41 | volumes:
42 | - {{ .ModelDirectory | default "/data/ollama" }}:/root/.ollama
43 | restartPolicy: {}
44 | proxy:
45 | tunnels:
46 | - localPort: {{ .Port | default "11434" }}
47 |
--------------------------------------------------------------------------------
/gallery/templates/llocal-search.yaml:
--------------------------------------------------------------------------------
1 | usage:
2 | |
3 | Run on-premsie search engine with Ollama, Chroma, Redis and Searxng.
4 | ---
5 | name: llocal-search
6 | scheduling:
7 | type: Conditional
8 | selectors: {}
9 | spec:
10 | containers:
11 | - name: backend
12 | image: nilsherzig/llocalsearch-backend:latest
13 | env:
14 | - name: OLLAMA_HOST
15 | value: http://ollama:11434
16 | - name: CHROMA_DB_URL
17 | value: http://chromadb:8000
18 | - name: SEARXNG_DOMAIN
19 | value: http://searxng:8080
20 | - name: MAX_ITERATIONS
21 | value: "30"
22 | - name: EMBEDDINGS_MODEL_NAME
23 | value: "nomic-embed-text:v1.5"
24 | # Frontend
25 | - name: frontend
26 | image: nilsherzig/llocalsearch-frontend:latest
27 | ports:
28 | - {{ .Port | default "4173" }}:4173
29 | # Ollama, running your models
30 | - name: ollama
31 | image: ollama/ollama:latest
32 | gpus: all
33 | # ports:
34 | # - {{ .Port | default "11434" }}:11434
35 | volumes:
36 | - {{ .OllamaDataDir | default "/data/ollama" }}:/root/.ollama
37 | # Chroma for embeddings
38 | - name: chromadb
39 | image: chromadb/chroma:latest
40 | env:
41 | - name: IS_PERSISTENT
42 | value: "TRUE"
43 | volumes:
44 | - {{ .ChromaDataDir | default "/data/chroma" }}:/chroma/chroma/
45 | # Redis - cache KV store
46 | - name: redis
47 | image: redis:latest
48 | args:
49 | - redis-server
50 | - --save
51 | - '30'
52 | - '1'
53 | - --loglevel
54 | - warning
55 | volumes:
56 | - {{ .RedisDataDir | default "/data/redis" }}:/data
57 | # Search engine
58 | - name: searxng
59 | image: docker.io/searxng/searxng:latest
60 | env:
61 | - name: SEARXNG_BASE_URL
62 | value: http://localhost/
63 | files:
64 | - filepath: /etc/searxng/limiter.toml
65 | contents: |
66 | [botdetection.ip_limit]
67 | # activate link_token method in the ip_limit method
68 | link_token = true
69 | - filepath: /etc/searxng/settings.yml
70 | contents: |
71 | # see https://docs.searxng.org/admin/settings/settings.html#settings-use-default-settings
72 | use_default_settings: true
73 | server:
74 | # base_url is defined in the SEARXNG_BASE_URL environment variable, see .env and docker-compose.yml
75 | secret_key: "51b30631d62b441ec1715009d96cf324f89993401cb2ce8301c4170d2fe6ed13" # change this!
76 | limiter: false # can be disabled for a private instance
77 | image_proxy: true
78 | ui:
79 | static_use_hash: true
80 | redis:
81 | url: redis://redis:6379/0
82 | search:
83 | safe_search: 0
84 | autocomplete: ""
85 | default_lang: ""
86 | ban_time_on_fail: 5
87 | max_ban_time_on_fail: 120
88 | suspended_times:
89 | SearxEngineAccessDenied: 86400
90 | SearxEngineCaptcha: 86400
91 | SearxEngineTooManyRequests: 3600
92 | cf_SearxEngineCaptcha: 1296000
93 | cf_SearxEngineAccessDenied: 86400
94 | recaptcha_SearxEngineCaptcha: 604800
95 | formats:
96 | - html
97 | - json
98 | - filepath: /etc/searxng/uwsgi.ini
99 | contents: |
100 | [uwsgi]
101 | # Who will run the code
102 | uid = searxng
103 | gid = searxng
104 |
105 | # Number of workers (usually CPU count)
106 | # default value: %k (= number of CPU core, see Dockerfile)
107 | workers = %k
108 |
109 | # Number of threads per worker
110 | # default value: 4 (see Dockerfile)
111 | threads = 4
112 |
113 | # The right granted on the created socket
114 | chmod-socket = 666
115 |
116 | # Plugin to use and interpreter config
117 | single-interpreter = true
118 | master = true
119 | plugin = python3
120 | lazy-apps = true
121 | enable-threads = 4
122 |
123 | # Module to import
124 | module = searx.webapp
125 |
126 | # Virtualenv and python path
127 | pythonpath = /usr/local/searxng/
128 | chdir = /usr/local/searxng/searx/
129 |
130 | # automatically set processes name to something meaningful
131 | auto-procname = true
132 |
133 | # Disable request logging for privacy
134 | disable-logging = true
135 | log-5xx = true
136 |
137 | # Set the max size of a request (request-body excluded)
138 | buffer-size = 8192
139 |
140 | # No keep alive
141 | # See https://github.com/searx/searx-docker/issues/24
142 | add-header = Connection: close
143 |
144 | # uwsgi serves the static files
145 | static-map = /static=/usr/local/searxng/searx/static
146 | # expires set to one day
147 | static-expires = /* 86400
148 | static-gzip-all = True
149 | offload-threads = 4
150 | capDrop:
151 | - ALL
152 | capAdd:
153 | - CHOWN
154 | - SETGID
155 | - SETUID
156 | volumes:
157 | - {{ .SearxngDataDir | default "/data/searxng" }}:/data
158 |
159 |
--------------------------------------------------------------------------------
/gallery/templates/lorax.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | LoRAX supports multi-turn chat conversations combined with dynamic adapter loading through an OpenAI compatible API. Just specify any adapter as the model parameter.
3 | examples:
4 | - title: Generate a chat completion
5 | description: "LoRAX supports multi-turn chat conversations combined with dynamic adapter loading through an OpenAI compatible API. Just specify any adapter as the model parameter:"
6 | code: |
7 | curl -H 'Content-Type: application/json' http://{{ .AppURL | default "http://localhost" }}:{{ .Port | default "8080" }}/v1/chat/completions -d '{
8 | "model": "vineetsharma/qlora-adapter-Mistral-7B-Instruct-v0.1-gsm8k",
9 | "messages": [{"role": "user", "content": "Say this is a test!"}]
10 | }'
11 | - title: Quantization
12 | description: |
13 | LoRAX supports loading the base model with quantization to reduce memory overhead,
14 | while loading adapters in full (fp32) or half precision (fp16, bf16), similar to the approach described in QLoRA.
15 |
16 | See https://loraexchange.ai/guides/quantization/ for more info.
17 | ---
18 | name: {{ .SynpseTemplateID }}
19 | scheduling:
20 | type: Conditional
21 | selectors: {}
22 | spec:
23 | containers:
24 | - name: lorax
25 | image: ghcr.io/predibase/lorax:main
26 | args:
27 | - --model-id
28 | - {{ .Model }}
29 | gpus: all
30 | ipcMode: host
31 | ports:
32 | - {{ .Port | default "8080" }}:80
33 | volumes:
34 | - {{ .ModelDirectory | default "/data/lorax" }}:/data
35 | env:
36 | - name: HUGGING_FACE_HUB_TOKEN
37 | value: "{{ .HuggingFaceToken | default "" }}"
38 | restartPolicy: {}
39 | proxy:
40 | tunnels:
41 | - localPort: {{ .Port | default "8080" }}
42 |
--------------------------------------------------------------------------------
/gallery/templates/n8n.yaml:
--------------------------------------------------------------------------------
1 | usage:
2 | |
3 | Dockerized n8n. You can find documentation here
4 | https://docs.n8n.io/hosting/installation/docker/ on available settings.
5 | Once n8n starts, open the web UI and start creating workflows. Data is
6 | persisted in the mounted volume.
7 |
8 | ---
9 | name: {{ .SynpseTemplateID }}
10 | scheduling:
11 | type: Conditional
12 | spec:
13 | containers:
14 | - name: n8n
15 | image: docker.n8n.io/n8nio/n8n
16 | user: root
17 | ports:
18 | - {{ .Port | default "5678" }}:5678
19 | volumes:
20 | - {{ .ModelDirectory | default "/data/n8n" }}:/home/node/
21 | env:
22 | - name: TZ
23 | value: {{ .Tz | default "Europe/London" }}
24 | restartPolicy: {}
25 | proxy:
26 | tunnels:
27 | - localPort: {{ .Port | default "5678" }}
--------------------------------------------------------------------------------
/gallery/templates/nodered.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | Check out documentation at https://nodered.org/docs/ to get started with Node-RED.
3 | Once you have deployed it, you can access the Node-RED editor by visiting {{ .AppURL | default "http://localhost:1880" }}.
4 |
5 | ---
6 | name: node-red
7 | scheduling:
8 | type: Conditional
9 | selectors: {}
10 | spec:
11 | containers:
12 | - name: nodered
13 | image: nodered/node-red:latest
14 | user: root
15 | volumes:
16 | - {{ .DataDir | default "/data/nodered" }}:/data
17 | - /etc/localtime:/etc/localtime
18 | - /root/.ssh:/root/.ssh
19 | - /etc/ssl/certs:/etc/ssl/certs
20 | ports:
21 | - {{ .Port | default "1880" }}:1880
22 | proxy:
23 | tunnels:
24 | - localPort: {{ .Port | default "1880" }}
--------------------------------------------------------------------------------
/gallery/templates/nvidia-nim-llm.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | NVIDIA NIM is an easy-to-use inference microservice. You can use popular open source libraries to interact with the model.
3 |
4 | Get your NGC API key from https://ngc.nvidia.com/setup/api-key and set it as the value for the NGC API key parameter
5 |
6 | examples:
7 | - title: 'Make an API call to the server'
8 | description: Once you see the NIM running, wait until the weights are downloaded. Once it is ready, you can interact with the model using the chat endpoint.
9 | code: |
10 | curl -X 'POST' \
11 | '{{ .AppURL | default "http://localhost:8080" }}/v1/chat/completions' \
12 | -H 'accept: application/json' \
13 | -H 'Content-Type: application/json' \
14 | -d '{
15 | "model": "{{ .ModelName | default "meta/llama3-8b-instruct" }}",
16 | "messages": [{"role":"user", "content":"Write a limerick about the wonders of GPU computing."}],
17 | "max_tokens": 64
18 | }'
19 | - title: 'Use the model from your Python application'
20 | description: The NIM exposes OpenAI compatible API so you can use it directly from your Python app
21 | code: |
22 | from openai import OpenAI
23 |
24 | client = OpenAI(
25 | base_url = "{{ .AppURL | default "http://localhost:8080" }}/v1",
26 | api_key = "local"
27 | )
28 |
29 | completion = client.chat.completions.create(
30 | model="{{ .ModelName | default "meta/llama3-8b-instruct" }}",
31 | messages=[{"role":"user","content":"Write a limerick about the wonders of GPU computing."}],
32 | temperature=0.5,
33 | top_p=1,
34 | max_tokens=1024,
35 | stream=True
36 | )
37 |
38 | for chunk in completion:
39 | if chunk.choices[0].delta.content is not None:
40 | print(chunk.choices[0].delta.content, end="")
41 |
42 |
43 | ---
44 | name: {{ .SynpseTemplateID }}
45 | scheduling:
46 | type: Conditional
47 | spec:
48 | containers:
49 | - name: nvidia-nim
50 | image: nvcr.io/nim/meta/{{ .ModelImage | default "llama3-8b-instruct:1.0.0" }}
51 | auth:
52 | username: $oauthtoken
53 | password: {{ .NgcApiKey}}
54 | gpus: all
55 | user: root
56 | ports:
57 | - {{ .Port | default "8080" }}:8000
58 | volumes:
59 | - {{ .ModelDirectory | default "/data/nvidia" }}:/opt/nim/.cache
60 | env:
61 | - name: NGC_API_KEY
62 | value: {{ .NgcApiKey}}
63 | - name: LOCAL_NIM_CACHE
64 | value: /opt/nim/
65 | restartPolicy: {}
66 | proxy:
67 | tunnels:
68 | - localPort: {{ .Port | default "8000" }}
69 |
--------------------------------------------------------------------------------
/gallery/templates/nvidia-triton.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | Triton Inference Server is an open source inference serving software that streamlines AI inferencing.
3 | Triton enables teams to deploy any AI model from multiple deep learning and machine learning frameworks,
4 | including TensorRT, TensorFlow, PyTorch, ONNX, OpenVINO, Python, RAPIDS FIL, and more. Triton Inference
5 | Server supports inference across cloud, data center, edge and embedded devices on NVIDIA GPUs, x86 and ARM CPU,
6 | or AWS Inferentia. Triton Inference Server delivers optimized performance for many query types, including real time,
7 | batched, ensembles and audio/video streaming
8 | examples:
9 | - title: 'Step 1: Initialize model repository on your machine'
10 | description: First, SSH into the machine where you want to deploy this template and fetch the models
11 | code: |
12 | git clone -b r24.02 https://github.com/triton-inference-server/server.git
13 | cd server/docs/examples
14 | ./fetch_models.sh
15 | - title: 'Step 2: Deploy Triton Inference Server'
16 | description: |
17 | Deploy Triton Inference Server with the models fetched in the previous step.
18 | Set ModelDirectory parameter to the directory where the models are stored.
19 | To check whether it's running:
20 | code: |
21 | curl -6 --verbose "http://[::1]:8000/v2/health/ready"
22 |
23 | ---
24 | name: nvidia-triton-inference-server
25 | scheduling:
26 | type: Conditional
27 | spec:
28 | containers:
29 | - name: runner
30 | image: nvcr.io/nvidia/tritonserver:24.02-py3
31 | args:
32 | - tritonserver
33 | - --model-repository=/models
34 | # gpus: all
35 | networkMode: host
36 | ipcMode: host
37 | ulimit:
38 | memlock: "-1"
39 | stack: "67108864"
40 | volumes:
41 | - {{ .ModelDirectory | default "/data/models" }}:/models
42 | restartPolicy: {}
43 |
--------------------------------------------------------------------------------
/gallery/templates/ollama.yaml:
--------------------------------------------------------------------------------
1 | usage:
2 | |
3 | Run Ollama as an API server. To interact with the API
4 | refer to https://github.com/ollama/ollama/blob/main/docs/api.md
5 | examples:
6 | - title: Pull a model
7 | description: "By default Ollama doesn't come with model weights, you ned to pull them before using the model:"
8 | code: |
9 | curl {{ .AppURL | default "http://localhost:11434" }}/api/pull -d '{
10 | "name": "llama2"
11 | }'
12 | - title: Generate a chat completion
13 | description: "To interact with the model use the chat endpoint:"
14 | code: |
15 | curl {{ .AppURL | default "http://localhost:11434" }}/api/chat -d '{
16 | "model": "llama2",
17 | "messages": [
18 | {
19 | "role": "user",
20 | "content": "why is the sky blue?"
21 | }
22 | ]
23 | }'
24 |
25 | ---
26 | name: {{ .SynpseTemplateID }}
27 | scheduling:
28 | type: Conditional
29 | # selectors:
30 | # key: "value" # For selecting devices by labels
31 | # devices: # For selecting devices by name or ID
32 | # - device_name
33 | spec:
34 | containers:
35 | - name: ollama
36 | image: ollama/ollama:latest
37 | ports:
38 | - {{ .Port | default "11434" }}:11434
39 | volumes:
40 | - {{ .ModelDirectory | default "/data/ollama" }}:/root/.ollama
41 | restartPolicy: {}
42 | proxy:
43 | tunnels:
44 | - localPort: {{ .Port | default "11434" }}
--------------------------------------------------------------------------------
/gallery/templates/open-webui.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | Once running, open {{ .AppURL | default "http://localhost:8844" }} in your browser and create an account or login to an existing one.
3 |
4 | You can read more about the project here: https://github.com/open-webui/open-webui
5 | ---
6 | name: {{ .SynpseTemplateID | default "open-webui" }}
7 | scheduling:
8 | type: Conditional
9 | slectors: {}
10 | spec:
11 | containers:
12 | - name: open-webui
13 | image: ghcr.io/open-webui/open-webui:main
14 | ports:
15 | - "{{ .Port | default "8844" }}:8080"
16 | volumes:
17 | - {{ .ModelDirectory | default "/data/open-webui" }}:/app/backend/data
18 | env:
19 | - name: OLLAMA_BASE_URL
20 | value: http://ollama:11434
21 | restartPolicy: {}
22 | - name: ollama
23 | image: ollama/ollama:latest
24 | volumes:
25 | - {{ .ModelDirectory | default "/data/open-webui" }}:/root/.ollama
26 | restartPolicy: {}
27 | proxy:
28 | tunnels:
29 | - localPort: {{ .Port | default "8844" }}
30 |
--------------------------------------------------------------------------------
/gallery/templates/opendevin.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | OpenDevin is a platform for autonomous software engineers, powered by AI and LLMs.
3 |
4 | examples:
5 | - title: Accessing OpenDevin
6 | description: |
7 | Once running, you will be able to access the web UI through either public Synpse URL or localhost if proxy
8 | is disabled ({{ .AppURL | default "http://localhost:3000" }}). First thing to do will be setting up
9 | your LLM backend.
10 |
11 | ---
12 |
13 | name: {{ .SynpseTemplateID }}
14 | scheduling:
15 | type: Conditional
16 | selectors: {}
17 | spec:
18 | containers:
19 | - name: opendevin
20 | extraHosts:
21 | - host.docker.internal:host-gateway
22 | image: ghcr.io/opendevin/opendevin:{{ .Tag | default "0.6.2" }}
23 | privileged: true
24 | volumes:
25 | - {{ .WorkspaceBase | default "/data/opendevin-workspace" }}:/opt/workspace_base
26 | - /var/run/docker.sock:/var/run/docker.sock
27 | ports:
28 | - {{ .Port | default "3000" }}:3000
29 | env:
30 | - name: WORKSPACE_MOUNT_PATH
31 | value: {{ .WorkspaceBase | default "/data/opendevin-workspace" }}
32 | - name: PERSIST_SANDBOX
33 | value: "false"
34 | - name: SANDBOX_USER_ID
35 | value: "1000"
36 | - name: SSH_PASSWORD
37 | value: {{ .SshPassword | default "verysecret" }}
38 | proxy:
39 | tunnels:
40 | - localPort: {{ .Port | default "3000" }}
--------------------------------------------------------------------------------
/gallery/templates/replicate-cog.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | {{ .Usage }}
3 | examples:
4 | - title: Using the model
5 | code: |
6 | curl -s -X POST \
7 | -H "Content-Type: application/json" \
8 | -d $'{
9 | {{- .InputExample | nindent 12 -}}
10 | }' \
11 | {{ .AppURL | default "http://localhost:5000" }}/predictions
12 |
13 | ---
14 | name: {{ .SynpseTemplateID }}
15 | scheduling:
16 | type: Conditional
17 | selectors: {}
18 | spec:
19 | containers:
20 | - name: cog
21 | image: {{ .CogImage }}
22 | gpus: all
23 | ports:
24 | - {{ .Port | default "5000" }}:5000
25 | proxy:
26 | tunnels:
27 | - localPort: {{ .Port | default "5000" }}
28 |
--------------------------------------------------------------------------------
/gallery/templates/uptime-kuma.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | You can check out demo server on https://demo.kuma.pet/start-demo. For status page
3 | documentation visit https://github.com/louislam/uptime-kuma/wiki/Status-Page
4 | examples:
5 | - title: Accessing Uptime Kuma
6 | description: |
7 | Once running, open {{ .AppURL | default "http://localhost:3001" }} to access the dashboard and setup your monitoring targets.
8 |
9 | ---
10 |
11 | name: uptime-monitoring
12 | scheduling:
13 | type: Conditional
14 | selectors: {}
15 | spec:
16 | containers:
17 | - name: uptime-kuma
18 | image: louislam/uptime-kuma:1
19 | volumes:
20 | - {{ .DataDir | default "/data/uptime-kuma" }}:/app/data
21 | ports:
22 | - {{ .Port | default "3001" }}:3001
23 | proxy:
24 | tunnels:
25 | - localPort: {{ .Port | default "3001" }}
--------------------------------------------------------------------------------
/gallery/templates/vllm.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | vLLM provides an HTTP server that implements OpenAI's Completions and Chat API (https://platform.openai.com/docs/api-reference/completions).
3 | examples:
4 | - title: Generate a chat completion
5 | description: "To interact with the model use the chat endpoint:"
6 | code: |
7 | curl http://{{ .SynpseDeviceIP | default "localhost" }}:{{ .Port | default "8000" }}/v1/chat/completions -d '{
8 | "model": "{{ .Model }}",
9 | "messages": [{"role": "user", "content": "Say this is a test!"}]
10 | }'
11 | ---
12 | name: {{ .SynpseTemplateID }}
13 | scheduling:
14 | type: Conditional
15 | selectors: {}
16 | spec:
17 | containers:
18 | - name: vllm
19 | image: vllm/vllm-openai:latest
20 | args:
21 | - --model
22 | - {{ .Model }}
23 | gpus: all
24 | ports:
25 | - {{ .Port | default "8000" }}:8000
26 | volumes:
27 | - {{ .ModelDirectory | default "/data/vllm" }}:/root/.cache/huggingface
28 | env:
29 | - name: HUGGING_FACE_HUB_TOKEN
30 | value: "{{ .HuggingFaceToken }}"
31 | restartPolicy: {}
32 | proxy:
33 | tunnels:
34 | - localPort: {{ .Port | default "8000" }}
35 |
36 |
--------------------------------------------------------------------------------
/gallery/templates/webhookrelay_forward.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | Starts forwarding webhooks through Webhook Relay (https://webhookrelay.com/).
3 | This is useful for integrating CI/CD pipelines, webhooks, and other services that
4 | require a public endpoint.
5 |
6 | Enhanced security through uni-directional forwarding.
7 |
8 | ---
9 | name: {{ .SynpseTemplateID }}
10 | scheduling:
11 | type: Conditional
12 | selectors: {}
13 | spec:
14 | containers:
15 | - name: webhookrelay
16 | image: webhookrelay/webhookrelayd:latest
17 | networkMode: host
18 | env:
19 | - name: RELAY_KEY
20 | value: "{{ .RelayKey }}"
21 | - name: RELAY_SECRET
22 | value: "{{ .RelaySecret }}"
23 | - name: BUCKETS
24 | value: "{{ .Buckets }}"
25 | restartPolicy: {}
26 |
27 |
--------------------------------------------------------------------------------
/gallery/templates/webhookrelay_tunnel.yaml:
--------------------------------------------------------------------------------
1 | usage: |
2 | Opens a Webhook Relay (https://webhookrelay.com/) bidirectional tunnel that
3 | allows you to expose your local services to the internet. This is useful for
4 | testing websites, APIs, and other services that require a public endpoint.
5 |
6 | ---
7 | name: {{ .SynpseTemplateID }}
8 | scheduling:
9 | type: Conditional
10 | selectors: {}
11 | spec:
12 | containers:
13 | - name: webhookrelay
14 | image: webhookrelay/webhookrelayd:latest
15 | networkMode: host
16 | args:
17 | - --mode
18 | - tunnel
19 | env:
20 | - name: RELAY_KEY
21 | value: "{{ .RelayKey }}"
22 | - name: RELAY_SECRET
23 | value: "{{ .RelaySecret }}"
24 | - name: TUNNELS
25 | value: "{{ .Tunnels }}"
26 | restartPolicy: {}
27 |
28 |
--------------------------------------------------------------------------------
/samples/calendso/README.md:
--------------------------------------------------------------------------------
1 | # Deploy personal calendar booking
2 |
3 | Calendso (now known as [Cal.Com](https://cal.com/)) is an open source Calendly alternative. You are in charge of your own data, workflow and appearance. Synpse makes it easy to host on your own hardware.
4 |
5 | ## Prerequisites
6 |
7 | - Account in [Synpse](https://cloud.synpse.net)
8 | - Ensure you have a project and namespace ready (we create them by default once you log-in)
9 | - DNS name (you will want to access it remotely)
10 |
11 | ## Deployment
12 |
13 | This deployment sample will:
14 | - Create a calendso container
15 | - Create a Postgres instance (data will be persisted on the host's `/data/calendso-postgres` path)
16 | - Create a [Prisma](https://www.prisma.io/studio) container through which you will be able to manage the data
17 |
18 |
19 |
20 |
21 |
22 |
23 | ## Next steps
24 |
25 | You will need to create a user through Prism and configure Google authentication to sync with your calendar. You can find a more detailed guide here: https://synpse.net/blog/self-hosting-calendso-caddy.
--------------------------------------------------------------------------------
/samples/calendso/calendso-synpse-caddy.yaml:
--------------------------------------------------------------------------------
1 | name: synpse-calendso
2 | scheduling:
3 | type: Conditional
4 | selectors:
5 | type: controller
6 | spec:
7 | containers:
8 | - name: calendso
9 | image: ctadeu/calendso:0.0.17-1
10 | env:
11 | - name: BASE_URL
12 | value: # Add your own URL, e.g. https://calendar.synpse.net
13 | - name: NEXTAUTH_URL
14 | value: # Add your own URL, e.g. https://calendar.synpse.net
15 | - name: DATABASE_URL
16 | fromSecret: calendsoPostgresConnString
17 | - name: CALENDSO_ENCRYPTION_KEY
18 | value: #
19 | - name: GOOGLE_API_CREDENTIALS
20 | fromSecret: calendsoGoogleApiCredentials # Create secret
21 | restartPolicy: {}
22 | - name: prisma
23 | image: codejamninja/prisma-studio:latest
24 | ports:
25 | - 5555:5555 # Access admin dashboard locally
26 | env:
27 | - name: POSTGRES_URL
28 | fromSecret: calendsoPostgresConnString
29 | restartPolicy: {}
30 | - name: postgres
31 | image: postgres:latest
32 | volumes:
33 | - /data/calendso-postgres:/var/lib/postgresql/data
34 | env:
35 | - name: PGDATA
36 | value: /var/lib/postgresql/data/pgdata
37 | - name: POSTGRES_USER
38 | value: calendso
39 | - name: POSTGRES_DB
40 | value: calendso
41 | - name: POSTGRES_PASSWORD
42 | fromSecret: calendsoPostgres
43 | restartPolicy: {}
44 | - name: caddy
45 | image: caddy:latest
46 | args:
47 | - caddy
48 | - reverse-proxy
49 | - --from
50 | - # Where your router routes to e.g. calendar.synpse.net:7300
51 | - --to
52 | - calendso:3000
53 | ports:
54 | - 7300:7300
55 | volumes:
56 | - /data/calendso-caddy:/data
57 | - /data/calendso-caddy-cfg:/config
58 | restartPolicy: {}
59 |
--------------------------------------------------------------------------------
/samples/clickhouse/README.md:
--------------------------------------------------------------------------------
1 | # ClickHouse
2 |
3 | ClickHouse® is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).
4 |
5 | ## Deploy
6 |
7 | ```
8 | synpse secret create clickhouse-config -f samples/clickhouse/config.xml
9 | synpse secret create clickhouse-config -f samples/clickhouse/clickhouse-synpse.yaml
10 | ```
11 |
12 | Easy small data set to try :)
13 | https://clickhouse.tech/docs/en/getting-started/example-datasets/recipes/
--------------------------------------------------------------------------------
/samples/clickhouse/clickhouse-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: ClickHouse
2 | description: ClickHouse
3 | scheduling:
4 | type: NoDevice
5 | spec:
6 | containers:
7 | - name: clickhouse
8 | image: yandex/clickhouse-server
9 | ports:
10 | - 8123:8123
11 | - 9000:9000
12 | ulimit:
13 | nofile: 262144:262144
14 | volumes:
15 | - /synpse/clikchouse:/var/lib/clickhouse
16 | secrets:
17 | - name: clickhouse-config
18 | filepath: /etc/clickhouse-server/users.d/synpse.xml
19 | restartPolicy: {}
20 | - name: tabix
21 | image: spoonest/clickhouse-tabix-web-client
22 | ports:
23 | - 8888:80
24 | restartPolicy: {}
25 |
--------------------------------------------------------------------------------
/samples/clickhouse/config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | 10000000000
9 |
10 |
18 | random
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | synpse_secure
28 |
29 | ::/0
30 |
31 |
32 |
33 | synpse
34 |
35 |
36 | synpse
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 | 3600
51 |
52 |
53 | 0
54 | 0
55 | 0
56 | 0
57 | 0
58 |
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/samples/collab/collab-gpu.yaml:
--------------------------------------------------------------------------------
1 | name: collab-gpu
2 | scheduling:
3 | type: AllDevices
4 | spec:
5 | containers:
6 | - name: jupyter
7 | # Ref: https://www.tensorflow.org/install/docker
8 | image: tensorflow/tensorflow:latest-gpu-jupyter
9 | args:
10 | - notebook
11 | - --notebook-dir=/tf
12 | - --ip
13 | - 0.0.0.0
14 | - --no-browser
15 | - --allow-root
16 | - --NotebookApp.allow_origin='https://colab.research.google.com'
17 | - --port=8888
18 | - --NotebookApp.port_retries=0
19 | command: jupyter
20 | gpus: all
21 | ports:
22 | - 8888:8888
23 |
--------------------------------------------------------------------------------
/samples/drone/README.md:
--------------------------------------------------------------------------------
1 | # Drone server Synpse
2 |
3 | How to run [Drone](https://readme.drone.io/) on Synpse
4 |
5 | See [webhookrelay sample](../webhookrelay/) for Webhookrelay part of the deployment.
6 |
7 | 1. Create secret with github credentials configuration.
8 | See [github docs](https://docs.github.com/en/developers/apps/getting-started-with-apps/differences-between-github-apps-and-oauth-apps) how to create one.
9 |
10 | ```
11 | synpse secret create droneClientID -v DRONE_GITHUB_CLIENT_ID
12 | synpse secret create droneSecret -v DRONE_GITHUB_CLIENT_SECRET
13 | ```
14 |
15 | To expose your Drone you can use [Webhookrelay](https://webhookrelay.com/)
16 |
17 | 1. Register and login to WHR
18 |
19 | 1. Create bidirectional tunnel with custom domain. Set destination to `http://drone:80`
20 |
21 | 1. Create token to configure your tunnel
22 |
23 | 1. Create secrets `relaySecret` and `relayKey`:
24 |
25 | ```
26 | synpse secret create relaySecret -v RELAYSECRET
27 | synpse secret create relayKey -v RELAYKEY
28 | ```
29 |
30 | 1. Change `drone-synpse.yaml` to point to your tunnel:
31 | ```
32 | - name: relayd
33 | image: webhookrelay/webhookrelayd-aarch64:1
34 | args:
35 | - --mode
36 | - tunnel
37 | - -t
38 | -
39 | ```
40 |
41 | 1. Deploy Drone to Synpse!
42 |
43 | 1. Create Drone deployments:
44 | ```
45 | synpse deploy -f samples/drone/drone-synpse.yaml
46 | synpse deploy -f samples/drone/drone-runner-synpse.yaml
47 | ```
48 |
--------------------------------------------------------------------------------
/samples/drone/drone-runner-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: Drone
2 | description: Drone runner deployment
3 | scheduling:
4 | type: AllDevices
5 | spec:
6 | containers:
7 | - name: drone-runner
8 | image: drone/drone-runner-docker:1
9 | volumes:
10 | - /var/run/docker.sock:/var/run/docker.sock
11 | env:
12 | - name: DRONE_RPC_PROTO
13 | value: https
14 | - name: DRONE_RPC_HOST
15 | value: drone-mj.webhookrelay.io
16 | - name: DRONE_RPC_SECRET
17 | value: 1bbf84ee3ae57cxxxxxxxxxx
18 | - name: DRONE_RUNNER_CAPACITY
19 | value: 1
20 | - name: DRONE_RUNNER_NAME
21 | value: ${HOSTNAME}
22 |
--------------------------------------------------------------------------------
/samples/drone/drone-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: drone-ci
2 | description: Drone CI
3 | scheduling:
4 | type: Conditional
5 | selectors:
6 | app: drone
7 | spec:
8 | containers:
9 | - name: drone
10 | image: drone/drone:latest
11 | volumes:
12 | - /data/drone:/data
13 | env:
14 | - name: DRONE_RPC_SECRET
15 | value: 1bbf84ee3ae57cxxxxxxxxxx
16 | - name: DRONE_SERVER_HOST
17 | value: drone-mj.webrelay.io
18 | - name: DRONE_SERVER_PROTO
19 | value: http
20 | - name: DRONE_USER_CREATE
21 | value: username:mjudeikis,admin:true
22 | - name: DRONE_LOGS_DEBUG
23 | value: "true"
24 | restartPolicy: {}
25 | - name: relayd
26 | image: webhookrelay/webhookrelayd:1
27 | args:
28 | - --mode
29 | - tunnel
30 | - -t
31 | - drone-mj
32 | env:
33 | - name: RELAY_KEY
34 | fromSecret: relayKey
35 | - name: RELAY_SECRET
36 | fromSecret: relaySecret
37 | - name: DRONE_GITHUB_CLIENT_ID
38 | fromSecret: droneClientID
39 | - name: DRONE_GITHUB_CLIENT_SECRET
40 | fromSecret: droneSecret
41 | restartPolicy: {}
42 |
--------------------------------------------------------------------------------
/samples/firefox/README.md:
--------------------------------------------------------------------------------
1 | # Firefox Kiosk on Synpse
2 |
3 | How to run [Firefox](https://www.mozilla.org/en-US/firefox/new/) on Synpse
4 |
5 | Required synpse to be installed with GUI support. Example can be found here:
6 | [Custom image build](https://docs.synpse.net/examples/preparing-os-images/build-a-custom-raspberry-pi-image)
7 |
8 | 1. Create Firefox deployment:
9 | ```
10 | synpse deploy -f samples/firefox/firefox.yaml
11 | ```
12 |
13 | ## Community
14 |
15 | Synpse is a young project and our community is constantly growing. Join our [Discord channel](https://discord.gg/dkgN4vVNdm) or participate in [GitHub Discussions](https://github.com/synpse-hq/synpse/discussions).
16 |
--------------------------------------------------------------------------------
/samples/firefox/firefox.yaml:
--------------------------------------------------------------------------------
1 | name: firefox-exex
2 | description: Firefox exec
3 | scheduling:
4 | type: AllDevices
5 | selectors: {}
6 | spec:
7 | execs:
8 | - name: exec
9 | command: firefox
10 | args:
11 | - https://synpse.net
12 | - --kiosk
13 | env:
14 | - name: DISPLAY
15 | value: :0
16 | restartPolicy: {}
17 |
--------------------------------------------------------------------------------
/samples/gladys-assistant/README.md:
--------------------------------------------------------------------------------
1 | # Deploy Gladys Home Assistant
2 |
3 | [Gladys](https://gladysassistant.com/)) is a privacy-first, open-source home assistant.
4 |
5 | ## Prerequisites
6 |
7 | - Account in [Synpse](https://cloud.synpse.net)
8 | - Ensure you have a project and namespace ready (we create them by default once you log-in)
9 |
10 | ## Deployment
11 |
12 | This deployment sample will:
13 | - Create a Gladys container
14 | - Data will be stored in `/data/gladysassistant` directory on the device
15 |
16 |
17 |
18 |
19 |
20 |
21 | ## Next steps
22 |
23 | Login using the device IP and port 8080 (http://device-ip:8080), create your initial user. Once that's created, you can use device tunnel to expose it to the internet so you can access to it remotely.
24 |
25 | Useful links:
26 | - Documentation on deploying Gladys via Synpse: https://docs.synpse.net/examples/home-automation/gladys-assistant
27 | - Gladys docs: https://gladysassistant.com/docs/
--------------------------------------------------------------------------------
/samples/gladys-assistant/gladys.yaml:
--------------------------------------------------------------------------------
1 | name: gladys
2 | scheduling:
3 | type: Conditional
4 | selectors:
5 | type: rpi # Update to your device selector
6 | spec:
7 | containers:
8 | - name: gladys
9 | image: gladysassistant/gladys:v4
10 | networkMode: host # Host network as it needs to scan it for devices
11 | privileged: true
12 | volumes:
13 | - /data/gladysassistant:/data/gladysassistant
14 | - /dev:/dev
15 | - /run/udev:/run/udev
16 | - /var/run/docker.sock:/var/run/docker.sock
17 | env:
18 | - name: NODE_ENV
19 | value: production
20 | - name: SERVER_PORT
21 | value: "8080"
22 | - name: TZ
23 | value: Europe/London # Change to your timezone
24 | - name: SQLITE_FILE_PATH
25 | value: /data/gladysassistant/gladys-production.db
26 |
--------------------------------------------------------------------------------
/samples/grafana/README.md:
--------------------------------------------------------------------------------
1 | # Grafana Synpse
2 |
3 | How to run Grafana on Synpse
4 |
5 | 1. Create secret with Grafana configuration:
6 | ```
7 | synpse secret create grafana-config -f samples/grafana/grafana.ini
8 | ```
9 |
10 | ! Important to know about grafana Config is that once started it will not accept changes.
11 |
12 | 1. Create Grafana deployment:
13 | ```
14 | synpse deploy -f samples/grafana/grafana-synpse.yaml
15 | ```
16 |
17 | # Expose with domain
18 |
19 | To expose your Grafana you can use
20 | [Webhookrelay](https://webhookrelay.com/)
21 |
22 | 1. Register and login to WHR
23 |
24 | 1. Create bidirectional tunnel with custom domain. Set destination to `http://grafana:3000`
25 |
26 | 1. Create token to configure your tunnel
27 |
28 | 1. Create secrets `relaySecret` and `relayKey`:
29 |
30 | ```
31 | synpse secret create relaySecret -v RELAYSECRET
32 | synpse secret create relayKey -v RELAYKEY
33 | ```
34 |
35 | 1. Change `grafana-synpse-webhookrelay.yaml` to point to your tunnel:
36 | ```
37 | - name: relayd
38 | image: webhookrelay/webhookrelayd-aarch64:1
39 | args:
40 | - --mode
41 | - tunnel
42 | - -t
43 | -
44 | ```
45 |
46 | 1. Deploy `grafana-synpse-webhookrelay.yaml` to Synpse!
--------------------------------------------------------------------------------
/samples/grafana/grafana-synpse-webhookrelay.yaml:
--------------------------------------------------------------------------------
1 | name: Grafana
2 | description: Grafana
3 | scheduling:
4 | type: NoDevices
5 | spec:
6 | containers:
7 | - name: grafana
8 | user: root
9 | # View available tags here: https://hub.docker.com/r/prom/prometheus/tags
10 | image: grafana/grafana
11 | ports:
12 | - "3000:3000"
13 | env:
14 | - name: GF_PATHS_CONFIG
15 | value: /etc/grafana/grafana2.ini
16 | volumes:
17 | - /synpse/grafana:/var/lib/grafana
18 | secrets:
19 | - name: grafana-config
20 | filepath: /etc/grafana/grafana2.ini
21 | - name: relayd
22 | # Note that this AMD64 image. for ARM use webhookrelay/webhookrelayd-aarch64:1
23 | image: webhookrelay/webhookrelayd:1
24 | args:
25 | - --mode
26 | - tunnel
27 | - -t
28 | - synpse-grafana
29 | env:
30 | - name: RELAY_KEY
31 | fromSecret: relayKey
32 | - name: RELAY_SECRET
33 | fromSecret: relaySecret
34 | restartPolicy: {}
35 | - name: relayd
36 | # Note that this AMD64 image. for ARM use webhookrelay/webhookrelayd-aarch64:1
37 | image: webhookrelay/webhookrelayd:1
38 | args:
39 | - --mode
40 | - tunnel
41 | - -t
42 | - synpse-grafana
43 | env:
44 | - name: RELAY_KEY
45 | fromSecret: relayKey
46 | - name: RELAY_SECRET
47 | fromSecret: relaySecret
48 | restartPolicy: {}
49 |
--------------------------------------------------------------------------------
/samples/grafana/grafana-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: Grafana
2 | description: Grafana
3 | scheduling:
4 | type: NoDevices
5 | spec:
6 | containers:
7 | - name: grafana
8 | user: root
9 | # View available tags here: https://hub.docker.com/r/prom/prometheus/tags
10 | image: grafana/grafana
11 | ports:
12 | - "3000:3000"
13 | volumes:
14 | - /synpse/grafana:/var/lib/grafana
15 | - name: relayd
16 | # Note that this AMD64 image. for ARM use webhookrelay/webhookrelayd-aarch64:1
17 | image: webhookrelay/webhookrelayd:1
18 | args:
19 | - --mode
20 | - tunnel
21 | - -t
22 | - synpse-grafana
23 | env:
24 | - name: RELAY_KEY
25 | fromSecret: relayKey
26 | - name: RELAY_SECRET
27 | fromSecret: relaySecret
28 | restartPolicy: {}
29 |
--------------------------------------------------------------------------------
/samples/grafana/grafana.ini:
--------------------------------------------------------------------------------
1 | [security]
2 | # disable creation of admin user on first start of grafana
3 | disable_initial_admin_creation = false
4 |
5 | # default admin user, created on startup
6 | admin_user = admin@synpse.net
7 |
8 | # default admin password, can be changed before first start of grafana, or in profile settings
9 | admin_password = admin
10 |
11 | # set to true if you host Grafana behind HTTPS. default is false.
12 | cookie_secure = true
13 |
14 | # set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled"
15 | cookie_samesite = none
--------------------------------------------------------------------------------
/samples/home-assistant/README.md:
--------------------------------------------------------------------------------
1 | # Home Assistant on Synpse
2 |
3 | [Home Assistant](https://www.home-assistant.io/) is a popular open-source home automation tool.
4 |
5 | It can either be installed as a "Home Assistant Operating System" that strictly limits what else you can do with your device or you can choose to install it as a [container](https://www.home-assistant.io/installation/linux#install-home-assistant-container) through Docker.
6 |
7 | ## Deployment
8 |
9 | Use `ha-synpse.yaml` to deploy a containerized version of Home Assistant. Once deployed, you can access it using `http://:8123`. Next steps can be found in [Home Assistant official docs](https://www.home-assistant.io/getting-started/onboarding/).
10 |
11 |
12 | ## Expose with domain
13 |
14 | To expose your [Home Assistant](https://www.home-assistant.io/) you can use
15 | [Webhookrelay](https://webhookrelay.com/)
16 |
17 | 1. Register and login to WHR
18 |
19 | 1. Create bidirectional tunnel with custom domain. Set destination to `http://homeassistant:8123`
20 |
21 | 1. Create token to configure your tunnel
22 |
23 | 1. Create secrets `relaySecret` and `relayKey`:
24 |
25 | ```
26 | synpse secret create relaySecret -v RELAYSECRET
27 | synpse secret create relayKey -v RELAYKEY
28 | ```
29 |
30 | 1. Change `ha-synpse-webhookrelay.yaml` to point to your tunnel:
31 | ```
32 | - name: relayd
33 | image: webhookrelay/webhookrelayd-aarch64:1
34 | args:
35 | - --mode
36 | - tunnel
37 | - -t
38 | -
39 | ```
40 |
41 | 1. Deploy `ha-synpse-webhookrelay.yaml` to Synpse!
--------------------------------------------------------------------------------
/samples/home-assistant/ha-synpse-webhookrelay.yaml:
--------------------------------------------------------------------------------
1 | name: homeassistant-with-whr
2 | description: Home Assistant with Webhook Relay tunnel.
3 | scheduling:
4 | type: AllDevices
5 | # Uncomment to use a specific device
6 | # selectors:
7 | # homeassistant: master
8 | spec:
9 | containers:
10 | - name: homeassistant
11 | image: docker.io/homeassistant/raspberrypi4-homeassistant:stable
12 | hostname: homeassistant
13 | volumes:
14 | - /usr/homeassistant:/config
15 | - /etc/localtime:/etc/localtime
16 | restartPolicy: {}
17 | - name: relayd
18 | image: webhookrelay/webhookrelayd-aarch64:1
19 | args:
20 | - --mode
21 | - tunnel
22 | - -t
23 | - hass
24 | env:
25 | - name: RELAY_KEY
26 | fromSecret: relayKey
27 | - name: RELAY_SECRET
28 | fromSecret: relaySecret
29 | restartPolicy: {}
30 |
--------------------------------------------------------------------------------
/samples/home-assistant/ha-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: homeassistant
2 | description: Homeassistant
3 | type: container
4 | scheduling:
5 | type: AllDevices
6 | # Uncomment to use a specific device
7 | # selectors:
8 | # homeassistant: master
9 | spec:
10 | containers:
11 | - name: homeassistant
12 | image: docker.io/homeassistant/raspberrypi4-homeassistant:stable
13 | networkMode: host
14 | volumes:
15 | - /usr/homeassistant:/config
16 | - /etc/localtime:/etc/localtime
17 |
--------------------------------------------------------------------------------
/samples/jupyterlab/README.md:
--------------------------------------------------------------------------------
1 | # Deploy JypyterLab for prototyping in ML/AI
2 |
3 | [JupyterLab](https://jupyter.org/) is a web-based interactive development environment for Jupyter notebooks, code, and data. JupyterLab is flexible: configure and arrange the user interface to support a wide range of workflows in data science, scientific computing, and machine learning. JupyterLab is extensible and modular: write plugins that add new components and integrate with existing ones.
4 |
5 | ## Prerequisites
6 |
7 | - Account in [Synpse](https://cloud.synpse.net)
8 | - Ensure you have a project and namespace ready (we create them by default once you log-in)
9 |
10 | ## Deployment
11 |
12 | This deployment sample will:
13 | - Create a JupyterLab container (data will be persisted on the host's `/data/synpse-jupyter` path)
14 |
15 |
16 |
17 |
18 |
19 | ## Next steps
20 |
21 | Login by opening your http://[device IP]:8888 address, start prototyping!
22 |
--------------------------------------------------------------------------------
/samples/jupyterlab/jupyterlab-gpu.yaml:
--------------------------------------------------------------------------------
1 | name: jupyterlab-gpu
2 | scheduling:
3 | type: Conditional
4 | # selectors:
5 | # key: "value" # For selecting devices by labels
6 | # devices: # For selecting devices by name or ID
7 | # - device_name
8 | spec:
9 | containers:
10 | - name: jupyter
11 | image: cschranz/gpu-jupyter:v1.4_cuda-11.0_ubuntu-20.04_python-only
12 | # Uncomment to use authentication to the registry
13 | # auth:
14 | # username:
15 | # fromSecret: dockerPassword
16 | # gpus: all
17 | user: root
18 | ports:
19 | - 8888:8888
20 | env:
21 | - name: JUPYTER_TOKEN
22 | value: jupyter123
23 | - name: GRANT_SUDO
24 | value: yes
25 | - name: JUPYTER_ENABLE_LAB
26 | value: yes
27 | volumes:
28 | - /data/jupyter:/home/jovyan/work
29 | restartPolicy: {}
30 |
--------------------------------------------------------------------------------
/samples/jupyterlab/jupyterlab.yaml:
--------------------------------------------------------------------------------
1 | name: jupyterlab
2 | scheduling:
3 | type: Conditional
4 | # selectors:
5 | # key: "value" # For selecting devices by labels
6 | # devices: # For selecting devices by name or ID
7 | # - device_name
8 | spec:
9 | containers:
10 | - name: jupyter
11 | image: quay.io/jupyter/minimal-notebook:latest
12 | # Uncomment to use authentication to the registry
13 | # auth:
14 | # username:
15 | # fromSecret: dockerPassword
16 | # gpus: all
17 | user: root
18 | ports:
19 | - 8888:8888
20 | env:
21 | - name: JUPYTER_TOKEN
22 | value: jupyter123
23 | - name: GRANT_SUDO
24 | value: yes
25 | - name: JUPYTER_ENABLE_LAB
26 | value: yes
27 | volumes:
28 | - /data/jupyter:/home/jovyan/work
29 | restartPolicy: {}
30 |
--------------------------------------------------------------------------------
/samples/nfs-server/nfs-client.yaml:
--------------------------------------------------------------------------------
1 | name: NFSClient
2 | description: NFSClient deployment
3 | scheduling:
4 | type: AllDevices
5 | selectors: {}
6 | spec:
7 | containers:
8 | - name: nfs-client
9 | image: busybox
10 | args:
11 | - sleep
12 | - "3600"
13 | mount:
14 | - type: volume
15 | target: /mnt/nfs
16 | source: nfs
17 | bindOptions: {}
18 | volumeOptions:
19 | driverConfig:
20 | name: local
21 | options:
22 | device: :/
23 | o: addr=192.168.1.138,nfsvers=4
24 | type: nfs
25 | tmpfsOptions: {}
26 | privileged: true
27 | volumeDriver: local
28 | restartPolicy: {}
29 |
--------------------------------------------------------------------------------
/samples/nfs-server/nfs-server.yaml:
--------------------------------------------------------------------------------
1 | name: NFSServer
2 | description: NFSServer deployment
3 | scheduling:
4 | type: NoDevices
5 | spec:
6 | containers:
7 | - name: nfs
8 | networkMode: host
9 | privileged: true
10 | image: itsthenetwork/nfs-server-alpine:latest
11 | ports:
12 | - "2049:2049"
13 | env:
14 | - name: SHARED_DIRECTORY
15 | value: /nfsshare
16 | volumes:
17 | - /mnt/nfs:/nfsshare
18 |
--------------------------------------------------------------------------------
/samples/nginx/nginx.yaml:
--------------------------------------------------------------------------------
1 | name: nginx-example
2 | scheduling:
3 | type: Conditional
4 | # selectors:
5 | # key: "value" # For selecting devices by labels
6 | # devices: # For selecting devices by name or ID
7 | # - device_name
8 | spec:
9 | containers:
10 | - name: nginx
11 | image: nginx:latest
12 | ports:
13 | - 8080:80
14 | restartPolicy: {}
15 |
--------------------------------------------------------------------------------
/samples/node-red/README.md:
--------------------------------------------------------------------------------
1 | # Home Assistant on Synpse
2 |
3 | [Node-RED](https://nodered.org/) is a low-code programming for event-driven applications.
4 |
5 | ## Deployment
6 |
7 | Use `nodered-synpse.yaml` to deploy a containerized version of Node-RED. Once deployed, you can access it using `http://:1880`. Next steps can be found in [Node-RED official docs](https://nodered.org/docs/getting-started/).
--------------------------------------------------------------------------------
/samples/node-red/nodered-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: NodeRed
2 | description: Low-code programming for event-driven applications
3 | scheduling:
4 | type: AllDevices
5 | # Uncomment to use a specific device
6 | # selectors:
7 | # nodered: master
8 | spec:
9 | containers:
10 | - name: nodered
11 | image: nodered/node-red:latest-minimal
12 | user: root
13 | ports:
14 | - "1880:1880"
15 | volumes:
16 | - /data/nodered:/data
17 | - /etc/localtime:/etc/localtime
18 | - /root/.ssh:/root/.ssh
19 |
--------------------------------------------------------------------------------
/samples/ollama/ollama.yaml:
--------------------------------------------------------------------------------
1 | name: ollama
2 | scheduling:
3 | type: Conditional
4 | # selectors:
5 | # key: "value" # For selecting devices by labels
6 | # devices: # For selecting devices by name or ID
7 | # - device_name
8 | spec:
9 | containers:
10 | - name: ollama
11 | image: ollama/ollama:latest
12 | # Uncomment to enable GPU support
13 | # gpus: all
14 | ports:
15 | - 11434:11434
16 | volumes:
17 | - /data/ollama:/root/.ollama
18 | restartPolicy: {}
19 |
--------------------------------------------------------------------------------
/samples/owncloud/owncloud-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: OwnCloud
2 | description: OwnCloud deployment
3 | scheduling:
4 | type: NoDevices
5 | spec:
6 | containers:
7 | - name: owncloud
8 | # View available tags here: https://hub.docker.com/r/owncloud/server
9 | image: owncloud/server
10 | ports:
11 | - "8080:8080"
12 | volumes:
13 | - /data/owncloud:/mnt/data
14 |
15 |
--------------------------------------------------------------------------------
/samples/pihole/pihole-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: PiHole
2 | description: Network-wide Ad Blocking
3 | scheduling:
4 | type: AllDevices
5 | spec:
6 | containers:
7 | - name: pihole
8 | # View available tags here: https://hub.docker.com/r/pihole/pihole/tags
9 | image: pihole/pihole:v5.8.1-arm64-stretch
10 | user: root
11 | ports:
12 | - "53:53"
13 | - "67:67"
14 | - "8085:80" # HTTP port
15 | env:
16 | - name: TZ
17 | value: Europe/London
18 | - name: WEBPASSWORD
19 | fromSecret: piholePassword # Create a secret with your password
20 | volumes:
21 | - /data/pihole/etc/:/etc/pihole/
22 | - /data/pihole/etc/dnsmasq.d:/etc/dnsmasq.d/
23 | capAdd:
24 | - NET_ADMIN
25 |
26 |
--------------------------------------------------------------------------------
/samples/prometheus/README.md:
--------------------------------------------------------------------------------
1 | # Prometheus Synpse
2 |
3 | How to run Prometheus on Synpse
4 |
5 | 1. Create secret with Prometheus configuration:
6 | ```
7 | synpse secret create prometheus-config -f samples/prometheus/prometheus-config.yaml
8 | ```
9 |
10 | 1. Create Prometheus deployment:
11 | ```
12 | synpse deploy -f samples/prometheus/prometheus-synpse.yaml
13 | ```
14 |
15 | # Expose with domain
16 |
17 | To expose your Prometheus you can use [Webhookrelay](https://webhookrelay.com/)
18 |
19 | 1. Register and login to WHR
20 |
21 | 1. Create bidirectional tunnel with custom domain. Set destination to `http://prometheus:9090`
22 |
23 | 1. Create token to configure your tunnel
24 |
25 | 1. Create secrets `relaySecret` and `relayKey`:
26 |
27 | ```
28 | synpse secret create relaySecret -v RELAYSECRET
29 | synpse secret create relayKey -v RELAYKEY
30 | ```
31 |
32 | 1. Change `prometheus-synpse-webhookrelay.yaml` to point to your tunnel:
33 | ```
34 | - name: relayd
35 | image: webhookrelay/webhookrelayd-aarch64:1
36 | args:
37 | - --mode
38 | - tunnel
39 | - -t
40 | -
41 | ```
42 |
43 | 1. Deploy `prometheus-synpse-webhookrelay.yaml` to Synpse!
--------------------------------------------------------------------------------
/samples/prometheus/prometheus-config.yml:
--------------------------------------------------------------------------------
1 | # my global config
2 | global:
3 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
4 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
5 | # scrape_timeout is set to the global default (10s).
6 |
7 | # Alertmanager configuration
8 | alerting:
9 | alertmanagers:
10 | - static_configs:
11 | - targets:
12 | # - alertmanager:9093
13 |
14 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
15 | rule_files:
16 | # - "first_rules.yml"
17 | # - "second_rules.yml"
18 |
19 | # A scrape configuration containing exactly one endpoint to scrape:
20 | # Here it's Prometheus itself.
21 | scrape_configs:
22 | # The job name is added as a label `job=` to any timeseries scraped from this config.
23 | - job_name: 'prometheus'
24 |
25 | # metrics_path defaults to '/metrics'
26 | # scheme defaults to 'http'.
27 |
28 | static_configs:
29 | - targets: ['localhost:9090']
--------------------------------------------------------------------------------
/samples/prometheus/prometheus-synpse-webhookrelay.yaml:
--------------------------------------------------------------------------------
1 | name: Prometheus
2 | description: Prometheus metrics
3 | scheduling:
4 | type: NoDevices
5 | spec:
6 | containers:
7 | - name: prometheus
8 | # View available tags here: https://hub.docker.com/r/prom/prometheus/tags
9 | image: prom/prometheus
10 | command:
11 | - --web.listen-address=:9091
12 | user: root
13 | ports:
14 | - "9090:9090"
15 | volumes:
16 | - /synpse/prometheus/:/prometheus
17 | secrets:
18 | - name: prometheus-config
19 | filepath: /etc/prometheus/prometheus.yml
20 | - name: relayd
21 | # Note that this AMD64 image. for ARM use webhookrelay/webhookrelayd-aarch64:1
22 | image: webhookrelay/webhookrelayd:1
23 | args:
24 | - --mode
25 | - tunnel
26 | - -t
27 | - synpse-prom
28 | env:
29 | - name: RELAY_KEY
30 | fromSecret: relayKey
31 | - name: RELAY_SECRET
32 | fromSecret: relaySecret
33 | restartPolicy: {}
34 |
--------------------------------------------------------------------------------
/samples/prometheus/prometheus-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: Prometheus
2 | description: Prometheus metrics
3 | scheduling:
4 | type: NoDevices
5 | spec:
6 | containers:
7 | - name: prometheus
8 | # View available tags here: https://hub.docker.com/r/prom/prometheus/tags
9 | image: prom/prometheus
10 | user: root
11 | ports:
12 | - "9090:9090"
13 | volumes:
14 | - /data/prometheus/:/prometheus
15 | secrets:
16 | - name: prometheus-config
17 | filepath: /etc/prometheus/prometheus.yml
18 |
19 |
20 |
--------------------------------------------------------------------------------
/samples/uptime-kuma/README.md:
--------------------------------------------------------------------------------
1 | # Deploy personal uptime monitoring
2 |
3 | 
4 |
5 | [Uptime-Kuma](https://github.com/louislam/uptime-kuma) is an Open-Source self-hosted uptime monitoring tool. It can periodically check your websites and alert you when services go down.
6 |
7 | ## Prerequisites
8 |
9 | - Account in [Synpse](https://cloud.synpse.net)
10 | - Ensure you have a project and namespace ready (we create them by default once you log-in)
11 |
12 | ## Deployment
13 |
14 | This deployment sample will:
15 | - Create a uptime-kuma container (data will be persisted on the host's `/data/uptime-kuma` path)
16 |
17 |
18 |
19 |
20 |
21 |
22 | ## Next steps
23 |
24 | Login by opening your http://[device IP]:3001 address. Then, create an account there and start adding your monitors.
25 |
--------------------------------------------------------------------------------
/samples/uptime-kuma/uptime-kuma-caddy.yaml:
--------------------------------------------------------------------------------
1 | name: uptime-monitoring
2 | scheduling:
3 | type: Conditional
4 | selectors:
5 | type: rpi
6 | spec:
7 | containers:
8 | - name: uptime-kuma
9 | image: louislam/uptime-kuma:1
10 | volumes:
11 | - /data/uptime-kuma:/app/data
12 | restartPolicy: {}
13 | - name: caddy
14 | image: caddy:latest
15 | args:
16 | - caddy
17 | - reverse-proxy
18 | - --from
19 | - uptime.example.com
20 | - --to
21 | - uptime-kuma:3001
22 | ports:
23 | - 3001:3001
24 | volumes:
25 | - /data/uptime-caddy:/data
26 | - /data/uptime-caddy-cfg:/config
27 | restartPolicy: {}
28 |
--------------------------------------------------------------------------------
/samples/uptime-kuma/uptime-kuma-webhookrelay.yaml:
--------------------------------------------------------------------------------
1 | name: uptime-monitoring
2 | scheduling:
3 | type: Conditional
4 | selectors:
5 | type: rpi
6 | spec:
7 | containers:
8 | - name: uptime-kuma
9 | image: louislam/uptime-kuma:1
10 | volumes:
11 | - /data/uptime-kuma:/app/data
12 | - name: relayd
13 | image: webhookrelay/webhookrelayd:1
14 | args:
15 | - --mode
16 | - tunnel
17 | - -t
18 | - uptime
19 | env:
20 | - name: RELAY_KEY
21 | fromSecret: relayKey
22 | - name: RELAY_SECRET
23 | fromSecret: relaySecret
24 |
25 |
--------------------------------------------------------------------------------
/samples/uptime-kuma/uptime-kuma.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/synpse-hq/synpse/85f9092fdb43007ce2d9ed7d9c3e9322b3992446/samples/uptime-kuma/uptime-kuma.png
--------------------------------------------------------------------------------
/samples/uptime-kuma/uptime-kuma.yaml:
--------------------------------------------------------------------------------
1 | name: uptime-monitoring
2 | scheduling:
3 | type: AllDevices
4 | # Or target specific device:
5 | # type: Conditional
6 | # selectors:
7 | # type: rpi
8 | spec:
9 | containers:
10 | - name: uptime-kuma
11 | image: louislam/uptime-kuma:1
12 | volumes:
13 | - /data/uptime-kuma:/app/data
14 | ports:
15 | - 3001:3001
16 |
--------------------------------------------------------------------------------
/samples/vlllm/vllm-mistral.yaml:
--------------------------------------------------------------------------------
1 | name: vllm-mistral
2 | scheduling:
3 | type: Conditional
4 | selectors:
5 | type: A100 # Or any other machines
6 | spec:
7 | containers:
8 | - name: mistral
9 | image: vllm/vllm-openai:latest
10 | args:
11 | - --model
12 | - mistralai/Mistral-7B-v0.1
13 | gpus: all
14 | ports:
15 | - 8000:8000
16 | volumes:
17 | - /data/hf:/root/.cache/huggingface
18 | env:
19 | - name: HUGGING_FACE_HUB_TOKEN
20 | value:
21 | restartPolicy: {}
22 |
--------------------------------------------------------------------------------
/samples/webhookrelay/webhookrelay-synpse.yaml:
--------------------------------------------------------------------------------
1 | name: webhookrelay-tunnel
2 | description: Webhook Relay tunnel to server running on the host
3 | scheduling:
4 | type: AllDevices
5 | # selectors:
6 | # type: device-1
7 | spec:
8 | containers:
9 | - name: relayd
10 | image: webhookrelay/webhookrelayd-aarch64:1 # or other images
11 | networkMode: host
12 | args:
13 | - --mode
14 | - tunnel
15 | - -t
16 | - TUNNEL_NAME # Create a tunnel
17 | env: # Create a token from here: https://my.webhookrelay.com/tokens
18 | - name: RELAY_KEY
19 | fromSecret: relayKey
20 | - name: RELAY_SECRET
21 | fromSecret: relaySecret
--------------------------------------------------------------------------------