The response has been limited to 50k tokens of the smallest files in the repo. You can remove this limitation by removing the max tokens filter.
├── .env.example
├── .gitignore
├── LICENSE
├── README.md
├── assets
    └── n8n-demo.gif
├── docker-compose.yml
└── n8n
    └── demo-data
        ├── credentials
            ├── sFfERYppMeBnFNeA.json
            └── xHuYe0MDGOs9IpBW.json
        └── workflows
            └── srOnR8PAY3u4RSwb.json


/.env.example:
--------------------------------------------------------------------------------
 1 | POSTGRES_USER=root
 2 | POSTGRES_PASSWORD=password
 3 | POSTGRES_DB=n8n
 4 | 
 5 | N8N_ENCRYPTION_KEY=super-secret-key
 6 | N8N_USER_MANAGEMENT_JWT_SECRET=even-more-secret
 7 | N8N_DEFAULT_BINARY_DATA_MODE=filesystem
 8 | 
 9 | # For Mac users running OLLAMA locally
10 | # See https://github.com/n8n-io/self-hosted-ai-starter-kit?tab=readme-ov-file#for-mac--apple-silicon-users
11 | # OLLAMA_HOST=host.docker.internal:11434
12 | 
13 | 
14 | 


--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | 


--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
  1 |                                  Apache License
  2 |                            Version 2.0, January 2004
  3 |                         http://www.apache.org/licenses/
  4 | 
  5 |    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
  6 | 
  7 |    1. Definitions.
  8 | 
  9 |       "License" shall mean the terms and conditions for use, reproduction,
 10 |       and distribution as defined by Sections 1 through 9 of this document.
 11 | 
 12 |       "Licensor" shall mean the copyright owner or entity authorized by
 13 |       the copyright owner that is granting the License.
 14 | 
 15 |       "Legal Entity" shall mean the union of the acting entity and all
 16 |       other entities that control, are controlled by, or are under common
 17 |       control with that entity. For the purposes of this definition,
 18 |       "control" means (i) the power, direct or indirect, to cause the
 19 |       direction or management of such entity, whether by contract or
 20 |       otherwise, or (ii) ownership of fifty percent (50%) or more of the
 21 |       outstanding shares, or (iii) beneficial ownership of such entity.
 22 | 
 23 |       "You" (or "Your") shall mean an individual or Legal Entity
 24 |       exercising permissions granted by this License.
 25 | 
 26 |       "Source" form shall mean the preferred form for making modifications,
 27 |       including but not limited to software source code, documentation
 28 |       source, and configuration files.
 29 | 
 30 |       "Object" form shall mean any form resulting from mechanical
 31 |       transformation or translation of a Source form, including but
 32 |       not limited to compiled object code, generated documentation,
 33 |       and conversions to other media types.
 34 | 
 35 |       "Work" shall mean the work of authorship, whether in Source or
 36 |       Object form, made available under the License, as indicated by a
 37 |       copyright notice that is included in or attached to the work
 38 |       (an example is provided in the Appendix below).
 39 | 
 40 |       "Derivative Works" shall mean any work, whether in Source or Object
 41 |       form, that is based on (or derived from) the Work and for which the
 42 |       editorial revisions, annotations, elaborations, or other modifications
 43 |       represent, as a whole, an original work of authorship. For the purposes
 44 |       of this License, Derivative Works shall not include works that remain
 45 |       separable from, or merely link (or bind by name) to the interfaces of,
 46 |       the Work and Derivative Works thereof.
 47 | 
 48 |       "Contribution" shall mean any work of authorship, including
 49 |       the original version of the Work and any modifications or additions
 50 |       to that Work or Derivative Works thereof, that is intentionally
 51 |       submitted to Licensor for inclusion in the Work by the copyright owner
 52 |       or by an individual or Legal Entity authorized to submit on behalf of
 53 |       the copyright owner. For the purposes of this definition, "submitted"
 54 |       means any form of electronic, verbal, or written communication sent
 55 |       to the Licensor or its representatives, including but not limited to
 56 |       communication on electronic mailing lists, source code control systems,
 57 |       and issue tracking systems that are managed by, or on behalf of, the
 58 |       Licensor for the purpose of discussing and improving the Work, but
 59 |       excluding communication that is conspicuously marked or otherwise
 60 |       designated in writing by the copyright owner as "Not a Contribution."
 61 | 
 62 |       "Contributor" shall mean Licensor and any individual or Legal Entity
 63 |       on behalf of whom a Contribution has been received by Licensor and
 64 |       subsequently incorporated within the Work.
 65 | 
 66 |    2. Grant of Copyright License. Subject to the terms and conditions of
 67 |       this License, each Contributor hereby grants to You a perpetual,
 68 |       worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 69 |       copyright license to reproduce, prepare Derivative Works of,
 70 |       publicly display, publicly perform, sublicense, and distribute the
 71 |       Work and such Derivative Works in Source or Object form.
 72 | 
 73 |    3. Grant of Patent License. Subject to the terms and conditions of
 74 |       this License, each Contributor hereby grants to You a perpetual,
 75 |       worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 76 |       (except as stated in this section) patent license to make, have made,
 77 |       use, offer to sell, sell, import, and otherwise transfer the Work,
 78 |       where such license applies only to those patent claims licensable
 79 |       by such Contributor that are necessarily infringed by their
 80 |       Contribution(s) alone or by combination of their Contribution(s)
 81 |       with the Work to which such Contribution(s) was submitted. If You
 82 |       institute patent litigation against any entity (including a
 83 |       cross-claim or counterclaim in a lawsuit) alleging that the Work
 84 |       or a Contribution incorporated within the Work constitutes direct
 85 |       or contributory patent infringement, then any patent licenses
 86 |       granted to You under this License for that Work shall terminate
 87 |       as of the date such litigation is filed.
 88 | 
 89 |    4. Redistribution. You may reproduce and distribute copies of the
 90 |       Work or Derivative Works thereof in any medium, with or without
 91 |       modifications, and in Source or Object form, provided that You
 92 |       meet the following conditions:
 93 | 
 94 |       (a) You must give any other recipients of the Work or
 95 |           Derivative Works a copy of this License; and
 96 | 
 97 |       (b) You must cause any modified files to carry prominent notices
 98 |           stating that You changed the files; and
 99 | 
100 |       (c) You must retain, in the Source form of any Derivative Works
101 |           that You distribute, all copyright, patent, trademark, and
102 |           attribution notices from the Source form of the Work,
103 |           excluding those notices that do not pertain to any part of
104 |           the Derivative Works; and
105 | 
106 |       (d) If the Work includes a "NOTICE" text file as part of its
107 |           distribution, then any Derivative Works that You distribute must
108 |           include a readable copy of the attribution notices contained
109 |           within such NOTICE file, excluding those notices that do not
110 |           pertain to any part of the Derivative Works, in at least one
111 |           of the following places: within a NOTICE text file distributed
112 |           as part of the Derivative Works; within the Source form or
113 |           documentation, if provided along with the Derivative Works; or,
114 |           within a display generated by the Derivative Works, if and
115 |           wherever such third-party notices normally appear. The contents
116 |           of the NOTICE file are for informational purposes only and
117 |           do not modify the License. You may add Your own attribution
118 |           notices within Derivative Works that You distribute, alongside
119 |           or as an addendum to the NOTICE text from the Work, provided
120 |           that such additional attribution notices cannot be construed
121 |           as modifying the License.
122 | 
123 |       You may add Your own copyright statement to Your modifications and
124 |       may provide additional or different license terms and conditions
125 |       for use, reproduction, or distribution of Your modifications, or
126 |       for any such Derivative Works as a whole, provided Your use,
127 |       reproduction, and distribution of the Work otherwise complies with
128 |       the conditions stated in this License.
129 | 
130 |    5. Submission of Contributions. Unless You explicitly state otherwise,
131 |       any Contribution intentionally submitted for inclusion in the Work
132 |       by You to the Licensor shall be under the terms and conditions of
133 |       this License, without any additional terms or conditions.
134 |       Notwithstanding the above, nothing herein shall supersede or modify
135 |       the terms of any separate license agreement you may have executed
136 |       with Licensor regarding such Contributions.
137 | 
138 |    6. Trademarks. This License does not grant permission to use the trade
139 |       names, trademarks, service marks, or product names of the Licensor,
140 |       except as required for reasonable and customary use in describing the
141 |       origin of the Work and reproducing the content of the NOTICE file.
142 | 
143 |    7. Disclaimer of Warranty. Unless required by applicable law or
144 |       agreed to in writing, Licensor provides the Work (and each
145 |       Contributor provides its Contributions) on an "AS IS" BASIS,
146 |       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 |       implied, including, without limitation, any warranties or conditions
148 |       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 |       PARTICULAR PURPOSE. You are solely responsible for determining the
150 |       appropriateness of using or redistributing the Work and assume any
151 |       risks associated with Your exercise of permissions under this License.
152 | 
153 |    8. Limitation of Liability. In no event and under no legal theory,
154 |       whether in tort (including negligence), contract, or otherwise,
155 |       unless required by applicable law (such as deliberate and grossly
156 |       negligent acts) or agreed to in writing, shall any Contributor be
157 |       liable to You for damages, including any direct, indirect, special,
158 |       incidental, or consequential damages of any character arising as a
159 |       result of this License or out of the use or inability to use the
160 |       Work (including but not limited to damages for loss of goodwill,
161 |       work stoppage, computer failure or malfunction, or any and all
162 |       other commercial damages or losses), even if such Contributor
163 |       has been advised of the possibility of such damages.
164 | 
165 |    9. Accepting Warranty or Additional Liability. While redistributing
166 |       the Work or Derivative Works thereof, You may choose to offer,
167 |       and charge a fee for, acceptance of support, warranty, indemnity,
168 |       or other liability obligations and/or rights consistent with this
169 |       License. However, in accepting such obligations, You may act only
170 |       on Your own behalf and on Your sole responsibility, not on behalf
171 |       of any other Contributor, and only if You agree to indemnify,
172 |       defend, and hold each Contributor harmless for any liability
173 |       incurred by, or claims asserted against, such Contributor by reason
174 |       of your accepting any such warranty or additional liability.
175 | 
176 |    END OF TERMS AND CONDITIONS
177 | 
178 |    APPENDIX: How to apply the Apache License to your work.
179 | 
180 |       To apply the Apache License to your work, attach the following
181 |       boilerplate notice, with the fields enclosed by brackets "[]"
182 |       replaced with your own identifying information. (Don't include
183 |       the brackets!)  The text should be enclosed in the appropriate
184 |       comment syntax for the file format. We also recommend that a
185 |       file or class name and description of purpose be included on the
186 |       same "printed page" as the copyright notice for easier
187 |       identification within third-party archives.
188 | 
189 |    Copyright 2024-present n8n GmbH
190 | 
191 |    Licensed under the Apache License, Version 2.0 (the "License");
192 |    you may not use this file except in compliance with the License.
193 |    You may obtain a copy of the License at
194 | 
195 |        http://www.apache.org/licenses/LICENSE-2.0
196 | 
197 |    Unless required by applicable law or agreed to in writing, software
198 |    distributed under the License is distributed on an "AS IS" BASIS,
199 |    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 |    See the License for the specific language governing permissions and
201 |    limitations under the License.
202 | 


--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
  1 | # Self-hosted AI starter kit
  2 | 
  3 | **Self-hosted AI Starter Kit** is an open-source Docker Compose template designed to swiftly initialize a comprehensive local AI and low-code development environment.
  4 | 
  5 | ![n8n.io - Screenshot](https://raw.githubusercontent.com/n8n-io/self-hosted-ai-starter-kit/main/assets/n8n-demo.gif)
  6 | 
  7 | Curated by <https://github.com/n8n-io>, it combines the self-hosted n8n
  8 | platform with a curated list of compatible AI products and components to
  9 | quickly get started with building self-hosted AI workflows.
 10 | 
 11 | > [!TIP]
 12 | > [Read the announcement](https://blog.n8n.io/self-hosted-ai/)
 13 | 
 14 | ### What’s included
 15 | 
 16 | ✅ [**Self-hosted n8n**](https://n8n.io/) - Low-code platform with over 400
 17 | integrations and advanced AI components
 18 | 
 19 | ✅ [**Ollama**](https://ollama.com/) - Cross-platform LLM platform to install
 20 | and run the latest local LLMs
 21 | 
 22 | ✅ [**Qdrant**](https://qdrant.tech/) - Open-source, high performance vector
 23 | store with an comprehensive API
 24 | 
 25 | ✅ [**PostgreSQL**](https://www.postgresql.org/) -  Workhorse of the Data
 26 | Engineering world, handles large amounts of data safely.
 27 | 
 28 | ### What you can build
 29 | 
 30 | ⭐️ **AI Agents** for scheduling appointments
 31 | 
 32 | ⭐️ **Summarize Company PDFs** securely without data leaks
 33 | 
 34 | ⭐️ **Smarter Slack Bots** for enhanced company communications and IT operations
 35 | 
 36 | ⭐️ **Private Financial Document Analysis** at minimal cost
 37 | 
 38 | ## Installation
 39 | 
 40 | ### Cloning the Repository
 41 | 
 42 | ```bash
 43 | git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
 44 | cd self-hosted-ai-starter-kit
 45 | cp .env.example .env # you should update secrets and passwords inside
 46 | ```
 47 | 
 48 | ### Running n8n using Docker Compose
 49 | 
 50 | #### For Nvidia GPU users
 51 | 
 52 | ```bash
 53 | git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
 54 | cd self-hosted-ai-starter-kit
 55 | cp .env.example .env # you should update secrets and passwords inside
 56 | docker compose --profile gpu-nvidia up
 57 | ```
 58 | 
 59 | > [!NOTE]
 60 | > If you have not used your Nvidia GPU with Docker before, please follow the
 61 | > [Ollama Docker instructions](https://github.com/ollama/ollama/blob/main/docs/docker.md).
 62 | 
 63 | ### For AMD GPU users on Linux
 64 | 
 65 | ```bash
 66 | git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
 67 | cd self-hosted-ai-starter-kit
 68 | cp .env.example .env # you should update secrets and passwords inside
 69 | docker compose --profile gpu-amd up
 70 | ```
 71 | 
 72 | #### For Mac / Apple Silicon users
 73 | 
 74 | If you’re using a Mac with an M1 or newer processor, you can't expose your GPU
 75 | to the Docker instance, unfortunately. There are two options in this case:
 76 | 
 77 | 1. Run the starter kit fully on CPU, like in the section "For everyone else"
 78 |    below
 79 | 2. Run Ollama on your Mac for faster inference, and connect to that from the
 80 |    n8n instance
 81 | 
 82 | If you want to run Ollama on your mac, check the
 83 | [Ollama homepage](https://ollama.com/)
 84 | for installation instructions, and run the starter kit as follows:
 85 | 
 86 | ```bash
 87 | git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
 88 | cd self-hosted-ai-starter-kit
 89 | cp .env.example .env # you should update secrets and passwords inside
 90 | docker compose up
 91 | ```
 92 | 
 93 | ##### For Mac users running OLLAMA locally
 94 | 
 95 | If you're running OLLAMA locally on your Mac (not in Docker), you need to modify the OLLAMA_HOST environment variable
 96 | 
 97 | 1. Set OLLAMA_HOST to `host.docker.internal:11434` in your .env file. 
 98 | 2. Additionally, after you see "Editor is now accessible via: <http://localhost:5678/>":
 99 | 
100 |     1. Head to <http://localhost:5678/home/credentials>
101 |     2. Click on "Local Ollama service"
102 |     3. Change the base URL to "http://host.docker.internal:11434/"
103 | 
104 | #### For everyone else
105 | 
106 | ```bash
107 | git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
108 | cd self-hosted-ai-starter-kit
109 | cp .env.example .env # you should update secrets and passwords inside
110 | docker compose --profile cpu up
111 | ```
112 | 
113 | ## ⚡️ Quick start and usage
114 | 
115 | The core of the Self-hosted AI Starter Kit is a Docker Compose file, pre-configured with network and storage settings, minimizing the need for additional installations.
116 | After completing the installation steps above, simply follow the steps below to get started.
117 | 
118 | 1. Open <http://localhost:5678/> in your browser to set up n8n. You’ll only
119 |    have to do this once.
120 | 2. Open the included workflow:
121 |    <http://localhost:5678/workflow/srOnR8PAY3u4RSwb>
122 | 3. Click the **Chat** button at the bottom of the canvas, to start running the workflow.
123 | 4. If this is the first time you’re running the workflow, you may need to wait
124 |    until Ollama finishes downloading Llama3.2. You can inspect the docker
125 |    console logs to check on the progress.
126 | 
127 | To open n8n at any time, visit <http://localhost:5678/> in your browser.
128 | 
129 | With your n8n instance, you’ll have access to over 400 integrations and a
130 | suite of basic and advanced AI nodes such as
131 | [AI Agent](https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/),
132 | [Text classifier](https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.text-classifier/),
133 | and [Information Extractor](https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.information-extractor/)
134 | nodes. To keep everything local, just remember to use the Ollama node for your
135 | language model and Qdrant as your vector store.
136 | 
137 | > [!NOTE]
138 | > This starter kit is designed to help you get started with self-hosted AI
139 | > workflows. While it’s not fully optimized for production environments, it
140 | > combines robust components that work well together for proof-of-concept
141 | > projects. You can customize it to meet your specific needs
142 | 
143 | ## Upgrading
144 | 
145 | * ### For Nvidia GPU setups:
146 | 
147 | ```bash
148 | docker compose --profile gpu-nvidia pull
149 | docker compose create && docker compose --profile gpu-nvidia up
150 | ```
151 | 
152 | * ### For Mac / Apple Silicon users
153 | 
154 | ```bash
155 | docker compose pull
156 | docker compose create && docker compose up
157 | ```
158 | 
159 | * ### For Non-GPU setups:
160 | 
161 | ```bash
162 | docker compose --profile cpu pull
163 | docker compose create && docker compose --profile cpu up
164 | ```
165 | 
166 | ## 👓 Recommended reading
167 | 
168 | n8n is full of useful content for getting started quickly with its AI concepts
169 | and nodes. If you run into an issue, go to [support](#support).
170 | 
171 | - [AI agents for developers: from theory to practice with n8n](https://blog.n8n.io/ai-agents/)
172 | - [Tutorial: Build an AI workflow in n8n](https://docs.n8n.io/advanced-ai/intro-tutorial/)
173 | - [Langchain Concepts in n8n](https://docs.n8n.io/advanced-ai/langchain/langchain-n8n/)
174 | - [Demonstration of key differences between agents and chains](https://docs.n8n.io/advanced-ai/examples/agent-chain-comparison/)
175 | - [What are vector databases?](https://docs.n8n.io/advanced-ai/examples/understand-vector-databases/)
176 | 
177 | ## 🎥 Video walkthrough
178 | 
179 | - [Installing and using Local AI for n8n](https://www.youtube.com/watch?v=xz_X2N-hPg0)
180 | 
181 | ## 🛍️ More AI templates
182 | 
183 | For more AI workflow ideas, visit the [**official n8n AI template
184 | gallery**](https://n8n.io/workflows/categories/ai/). From each workflow,
185 | select the **Use workflow** button to automatically import the workflow into
186 | your local n8n instance.
187 | 
188 | ### Learn AI key concepts
189 | 
190 | - [AI Agent Chat](https://n8n.io/workflows/1954-ai-agent-chat/)
191 | - [AI chat with any data source (using the n8n workflow too)](https://n8n.io/workflows/2026-ai-chat-with-any-data-source-using-the-n8n-workflow-tool/)
192 | - [Chat with OpenAI Assistant (by adding a memory)](https://n8n.io/workflows/2098-chat-with-openai-assistant-by-adding-a-memory/)
193 | - [Use an open-source LLM (via Hugging Face)](https://n8n.io/workflows/1980-use-an-open-source-llm-via-huggingface/)
194 | - [Chat with PDF docs using AI (quoting sources)](https://n8n.io/workflows/2165-chat-with-pdf-docs-using-ai-quoting-sources/)
195 | - [AI agent that can scrape webpages](https://n8n.io/workflows/2006-ai-agent-that-can-scrape-webpages/)
196 | 
197 | ### Local AI templates
198 | 
199 | - [Tax Code Assistant](https://n8n.io/workflows/2341-build-a-tax-code-assistant-with-qdrant-mistralai-and-openai/)
200 | - [Breakdown Documents into Study Notes with MistralAI and Qdrant](https://n8n.io/workflows/2339-breakdown-documents-into-study-notes-using-templating-mistralai-and-qdrant/)
201 | - [Financial Documents Assistant using Qdrant and](https://n8n.io/workflows/2335-build-a-financial-documents-assistant-using-qdrant-and-mistralai/) [Mistral.ai](http://mistral.ai/)
202 | - [Recipe Recommendations with Qdrant and Mistral](https://n8n.io/workflows/2333-recipe-recommendations-with-qdrant-and-mistral/)
203 | 
204 | ## Tips & tricks
205 | 
206 | ### Accessing local files
207 | 
208 | The self-hosted AI starter kit will create a shared folder (by default,
209 | located in the same directory) which is mounted to the n8n container and
210 | allows n8n to access files on disk. This folder within the n8n container is
211 | located at `/data/shared` -- this is the path you’ll need to use in nodes that
212 | interact with the local filesystem.
213 | 
214 | **Nodes that interact with the local filesystem**
215 | 
216 | - [Read/Write Files from Disk](https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-base.filesreadwrite/)
217 | - [Local File Trigger](https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-base.localfiletrigger/)
218 | - [Execute Command](https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-base.executecommand/)
219 | 
220 | ## 📜 License
221 | 
222 | This project is licensed under the Apache License 2.0 - see the
223 | [LICENSE](LICENSE) file for details.
224 | 
225 | ## 💬 Support
226 | 
227 | Join the conversation in the [n8n Forum](https://community.n8n.io/), where you
228 | can:
229 | 
230 | - **Share Your Work**: Show off what you’ve built with n8n and inspire others
231 |   in the community.
232 | - **Ask Questions**: Whether you’re just getting started or you’re a seasoned
233 |   pro, the community and our team are ready to support with any challenges.
234 | - **Propose Ideas**: Have an idea for a feature or improvement? Let us know!
235 |   We’re always eager to hear what you’d like to see next.
236 | 


--------------------------------------------------------------------------------
/assets/n8n-demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/n8n-io/self-hosted-ai-starter-kit/06319a57af662810230c1a63175baaf312b427a9/assets/n8n-demo.gif


--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
  1 | volumes:
  2 |   n8n_storage:
  3 |   postgres_storage:
  4 |   ollama_storage:
  5 |   qdrant_storage:
  6 | 
  7 | networks:
  8 |   demo:
  9 | 
 10 | x-n8n: &service-n8n
 11 |   image: n8nio/n8n:latest
 12 |   networks: ['demo']
 13 |   environment:
 14 |     - DB_TYPE=postgresdb
 15 |     - DB_POSTGRESDB_HOST=postgres
 16 |     - DB_POSTGRESDB_USER=${POSTGRES_USER}
 17 |     - DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}
 18 |     - N8N_DIAGNOSTICS_ENABLED=false
 19 |     - N8N_PERSONALIZATION_ENABLED=false
 20 |     - N8N_ENCRYPTION_KEY
 21 |     - N8N_USER_MANAGEMENT_JWT_SECRET
 22 |     - OLLAMA_HOST=${OLLAMA_HOST:-ollama:11434}
 23 |   env_file:
 24 |     - path: .env
 25 |       required: true
 26 | 
 27 | x-ollama: &service-ollama
 28 |   image: ollama/ollama:latest
 29 |   container_name: ollama
 30 |   networks: ['demo']
 31 |   restart: unless-stopped
 32 |   ports:
 33 |     - 11434:11434
 34 |   volumes:
 35 |     - ollama_storage:/root/.ollama
 36 | 
 37 | x-init-ollama: &init-ollama
 38 |   image: ollama/ollama:latest
 39 |   networks: ['demo']
 40 |   container_name: ollama-pull-llama
 41 |   volumes:
 42 |     - ollama_storage:/root/.ollama
 43 |   entrypoint: /bin/sh
 44 |   environment:
 45 |     - OLLAMA_HOST=ollama:11434
 46 |   command:
 47 |     - "-c"
 48 |     - "sleep 3; ollama pull llama3.2"
 49 | 
 50 | services:
 51 |   postgres:
 52 |     image: postgres:16-alpine
 53 |     hostname: postgres
 54 |     networks: ['demo']
 55 |     restart: unless-stopped
 56 |     environment:
 57 |       - POSTGRES_USER
 58 |       - POSTGRES_PASSWORD
 59 |       - POSTGRES_DB
 60 |     volumes:
 61 |       - postgres_storage:/var/lib/postgresql/data
 62 |     healthcheck:
 63 |       test: ['CMD-SHELL', 'pg_isready -h localhost -U ${POSTGRES_USER} -d ${POSTGRES_DB}']
 64 |       interval: 5s
 65 |       timeout: 5s
 66 |       retries: 10
 67 | 
 68 |   n8n-import:
 69 |     <<: *service-n8n
 70 |     hostname: n8n-import
 71 |     container_name: n8n-import
 72 |     entrypoint: /bin/sh
 73 |     command:
 74 |       - "-c"
 75 |       - "n8n import:credentials --separate --input=/demo-data/credentials && n8n import:workflow --separate --input=/demo-data/workflows"
 76 |     volumes:
 77 |       - ./n8n/demo-data:/demo-data
 78 |     depends_on:
 79 |       postgres:
 80 |         condition: service_healthy
 81 | 
 82 |   n8n:
 83 |     <<: *service-n8n
 84 |     hostname: n8n
 85 |     container_name: n8n
 86 |     restart: unless-stopped
 87 |     ports:
 88 |       - 5678:5678
 89 |     volumes:
 90 |       - n8n_storage:/home/node/.n8n
 91 |       - ./n8n/demo-data:/demo-data
 92 |       - ./shared:/data/shared
 93 |     depends_on:
 94 |       postgres:
 95 |         condition: service_healthy
 96 |       n8n-import:
 97 |         condition: service_completed_successfully
 98 | 
 99 |   qdrant:
100 |     image: qdrant/qdrant
101 |     hostname: qdrant
102 |     container_name: qdrant
103 |     networks: ['demo']
104 |     restart: unless-stopped
105 |     ports:
106 |       - 6333:6333
107 |     volumes:
108 |       - qdrant_storage:/qdrant/storage
109 | 
110 |   ollama-cpu:
111 |     profiles: ["cpu"]
112 |     <<: *service-ollama
113 | 
114 |   ollama-gpu:
115 |     profiles: ["gpu-nvidia"]
116 |     <<: *service-ollama
117 |     deploy:
118 |       resources:
119 |         reservations:
120 |           devices:
121 |             - driver: nvidia
122 |               count: 1
123 |               capabilities: [gpu]
124 | 
125 |   ollama-gpu-amd:
126 |     profiles: ["gpu-amd"]
127 |     <<: *service-ollama
128 |     image: ollama/ollama:rocm
129 |     devices:
130 |       - "/dev/kfd"
131 |       - "/dev/dri"
132 | 
133 |   ollama-pull-llama-cpu:
134 |     profiles: ["cpu"]
135 |     <<: *init-ollama
136 |     depends_on:
137 |       - ollama-cpu
138 | 
139 |   ollama-pull-llama-gpu:
140 |     profiles: ["gpu-nvidia"]
141 |     <<: *init-ollama
142 |     depends_on:
143 |       - ollama-gpu
144 | 
145 |   ollama-pull-llama-gpu-amd:
146 |     profiles: [gpu-amd]
147 |     <<: *init-ollama
148 |     image: ollama/ollama:rocm
149 |     depends_on:
150 |      - ollama-gpu-amd
151 | 


--------------------------------------------------------------------------------
/n8n/demo-data/credentials/sFfERYppMeBnFNeA.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "createdAt": "2024-02-23T16:27:55.919Z",
 3 |   "updatedAt": "2024-02-23T16:27:55.918Z",
 4 |   "id": "sFfERYppMeBnFNeA",
 5 |   "name": "Local QdrantApi database",
 6 |   "data": "U2FsdGVkX18bm81Pk18TjmfyKEIbzd91Dt1O8pUPgTxVGk5v1mXp7MlE/3Fl+NHGTMBqa3u7RBS36wTQ74rijQ==",
 7 |   "type": "qdrantApi",
 8 |   "nodesAccess": [
 9 |     {
10 |       "nodeType": "@n8n/n8n-nodes-langchain.vectorStoreQdrant",
11 |       "date": "2024-02-23T16:27:55.918Z"
12 |     }
13 |   ]
14 | }
15 | 


--------------------------------------------------------------------------------
/n8n/demo-data/credentials/xHuYe0MDGOs9IpBW.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "createdAt": "2024-02-23T16:26:54.475Z",
 3 |   "updatedAt": "2024-02-23T16:26:58.928Z",
 4 |   "id": "xHuYe0MDGOs9IpBW",
 5 |   "name": "Local Ollama service",
 6 |   "data": "U2FsdGVkX18BVmjQBCdNKSrjr0GhmcTwMgG/rSWhncWtqOLPT62WnCIktky8RgM1PhH7vMkMc5EuUFIQA/eEZA==",
 7 |   "type": "ollamaApi",
 8 |   "nodesAccess": [
 9 |     {
10 |       "nodeType": "@n8n/n8n-nodes-langchain.lmChatOllama",
11 |       "date": "2024-02-23T16:26:58.927Z"
12 |     },
13 |     {
14 |       "nodeType": "@n8n/n8n-nodes-langchain.lmOllama",
15 |       "date": "2024-02-23T16:26:58.927Z"
16 |     }
17 |   ]
18 | }
19 | 


--------------------------------------------------------------------------------
/n8n/demo-data/workflows/srOnR8PAY3u4RSwb.json:
--------------------------------------------------------------------------------
 1 | {
 2 |   "createdAt": "2024-02-23T16:58:31.616Z",
 3 |   "updatedAt": "2024-02-23T16:58:31.616Z",
 4 |   "id": "srOnR8PAY3u4RSwb",
 5 |   "name": "Demo workflow",
 6 |   "active": false,
 7 |   "nodes": [
 8 |     {
 9 |       "parameters": {},
10 |       "id": "74003dcd-2ac7-4caa-a1cd-adecc5143c07",
11 |       "name": "Chat Trigger",
12 |       "type": "@n8n/n8n-nodes-langchain.chatTrigger",
13 |       "typeVersion": 1,
14 |       "position": [
15 |         660,
16 |         340
17 |       ],
18 |       "webhookId": "cdb5c076-d458-4b9d-8398-f43bd25059b1"
19 |     },
20 |     {
21 |       "parameters": {},
22 |       "id": "ce8c3da4-899c-4cc4-af73-8096c64eec64",
23 |       "name": "Basic LLM Chain",
24 |       "type": "@n8n/n8n-nodes-langchain.chainLlm",
25 |       "typeVersion": 1.3,
26 |       "position": [
27 |         880,
28 |         340
29 |       ]
30 |     },
31 |     {
32 |       "parameters": {
33 |         "model": "llama3.2:latest",
34 |         "options": {}
35 |       },
36 |       "id": "3dee878b-d748-4829-ac0a-cfd6705d31e5",
37 |       "name": "Ollama Chat Model",
38 |       "type": "@n8n/n8n-nodes-langchain.lmChatOllama",
39 |       "typeVersion": 1,
40 |       "position": [
41 |         900,
42 |         560
43 |       ],
44 |       "credentials": {
45 |         "ollamaApi": {
46 |           "id": "xHuYe0MDGOs9IpBW",
47 |           "name": "Local Ollama service"
48 |         }
49 |       }
50 |     }
51 |   ],
52 |   "connections": {
53 |     "Chat Trigger": {
54 |       "main": [
55 |         [
56 |           {
57 |             "node": "Basic LLM Chain",
58 |             "type": "main",
59 |             "index": 0
60 |           }
61 |         ]
62 |       ]
63 |     },
64 |     "Ollama Chat Model": {
65 |       "ai_languageModel": [
66 |         [
67 |           {
68 |             "node": "Basic LLM Chain",
69 |             "type": "ai_languageModel",
70 |             "index": 0
71 |           }
72 |         ]
73 |       ]
74 |     }
75 |   },
76 |   "settings": {
77 |     "executionOrder": "v1"
78 |   },
79 |   "staticData": null,
80 |   "meta": {
81 |     "templateCredsSetupCompleted": true
82 |   },
83 |   "pinData": {},
84 |   "versionId": "4e2affe6-bb1c-4ddc-92f9-dde0b7656796",
85 |   "triggerCount": 0,
86 |   "tags": []
87 | }
88 | 


--------------------------------------------------------------------------------