├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── lint-test-high-availability.yaml │ ├── lint-test-memgraph-lab.yaml │ ├── lint-test-memgraph.yml │ ├── pre-commit.yaml │ └── release.yml ├── .gitignore ├── .helmignore ├── .pre-commit-config.yaml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── artifacthub-repo.yml ├── charts ├── memgraph-high-availability │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── aks │ │ └── README.md │ ├── aws │ │ ├── README.md │ │ └── cluster.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── coordinators.yaml │ │ ├── data.yaml │ │ ├── ingress-nginx.yaml │ │ ├── mg-exporter.yaml │ │ ├── services-coordinators-external.yaml │ │ ├── services-coordinators.yaml │ │ ├── services-data-external.yaml │ │ └── services-data.yaml │ └── values.yaml ├── memgraph-lab │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── hpa.yaml │ │ ├── ingress.yaml │ │ ├── service.yaml │ │ ├── serviceaccount.yaml │ │ └── tests │ │ │ └── test-connection.yaml │ └── values.yaml └── memgraph │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── memgraph.conf │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── service.yaml │ ├── serviceaccount.yaml │ ├── statefulset.yaml │ ├── storageclass.yaml │ └── tests │ │ └── test-connection.yaml │ └── values.yaml ├── docker-compose ├── HA_register.cypher ├── README.md ├── docker-compose.yml └── license.cypher ├── index.yaml ├── scripts ├── aks.bash └── gke.bash └── tutorials ├── gcp └── README.md ├── ha-under-aws └── ha-under-azure /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @antejavor @katarinasupe @as51340 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "" 5 | labels: bug 6 | --- 7 | 8 | **Chart type** 9 | Specify the chart type you experienced issues with (Memgraph Standalone, Memgraph HA, Memgraph LAB) 10 | 11 | **Chart version:** 12 | Specify the version of the chart you are running. 13 | 14 | **What happened?** 15 | Describe the issue and what you expected to happen. 16 | 17 | **Environment:** 18 | Specify the environment where the issue occurred and how it could be related to the bug. 19 | 20 | **Relevant log output:** 21 | Please copy and paste any relevant log output from pods and containers. 22 | -------------------------------------------------------------------------------- /.github/workflows/lint-test-high-availability.yaml: -------------------------------------------------------------------------------- 1 | name: Lint and Test Charts High Availability 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'charts/memgraph-high-availability/**' 7 | 8 | 9 | jobs: 10 | lint-test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Set up Helm 19 | uses: azure/setup-helm@v3 20 | with: 21 | version: v3.14.0 22 | 23 | - uses: actions/setup-python@v4 24 | with: 25 | python-version: '3.12' 26 | check-latest: true 27 | 28 | - name: Download dependencies 29 | run: | 30 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 31 | helm repo update 32 | 33 | - name: Update chart dependencies 34 | run: | 35 | helm dependency update charts/memgraph-high-availability 36 | 37 | - name: Set up chart-testing 38 | uses: helm/chart-testing-action@v2.6.1 39 | 40 | - name: Run chart-testing (list-changed) 41 | id: list-changed 42 | run: | 43 | changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }}) 44 | if [[ -n "$changed" ]]; then 45 | echo "changed=true" >> "$GITHUB_OUTPUT" 46 | fi 47 | 48 | - name: Run chart-testing (lint) 49 | if: steps.list-changed.outputs.changed == 'true' 50 | run: ct lint --target-branch ${{ github.event.repository.default_branch }} --check-version-increment false --charts charts/memgraph-high-availability 51 | 52 | 53 | - name: Create kind cluster 54 | if: steps.list-changed.outputs.changed == 'true' 55 | uses: helm/kind-action@v1.8.0 56 | 57 | - name: Set up Memgraph environment variables 58 | if: steps.list-changed.outputs.changed == 'true' 59 | run: | 60 | echo "MEMGRAPH_ENTERPRISE_LICENSE=${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}" >> $GITHUB_ENV 61 | echo "MEMGRAPH_ORGANIZATION_NAME=${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}" >> $GITHUB_ENV 62 | 63 | - name: Custom Helm install for memgraph-high-availability 64 | if: steps.list-changed.outputs.changed == 'true' 65 | env: 66 | MEMGRAPH_ENTERPRISE_LICENSE: ${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }} 67 | MEMGRAPH_ORGANIZATION_NAME: ${{ secrets.MEMGRAPH_ORGANIZATION_NAME }} 68 | run: | 69 | helm install mem-ha-test ./charts/memgraph-high-availability \ 70 | --set memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE=$MEMGRAPH_ENTERPRISE_LICENSE \ 71 | --set memgraph.env.MEMGRAPH_ORGANIZATION_NAME=$MEMGRAPH_ORGANIZATION_NAME \ 72 | --set memgraph.affinity.enabled=false 73 | 74 | - name: Wait for Memgraph setup to complete 75 | if: steps.list-changed.outputs.changed == 'true' 76 | run: sleep 30 77 | 78 | - name: Check the status of Memgraph setup 79 | if: steps.list-changed.outputs.changed == 'true' 80 | run: | 81 | pods=$(kubectl get pods --selector=job-name=memgraph-setup --output=jsonpath='{.items[*].metadata.name}') 82 | for pod in $pods; do 83 | echo "Logs from $pod:" 84 | kubectl logs $pod 85 | done 86 | 87 | - name: Run Helm tests 88 | if: steps.list-changed.outputs.changed == 'true' 89 | run: | 90 | timeout 30 helm test mem-ha-test # If connection to some port is broken, this will timeout with code 124 91 | -------------------------------------------------------------------------------- /.github/workflows/lint-test-memgraph-lab.yaml: -------------------------------------------------------------------------------- 1 | name: Lint and Test Charts Memgraph Lab 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'charts/memgraph-lab/**' 7 | 8 | 9 | jobs: 10 | lint-test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Set up Helm 19 | uses: azure/setup-helm@v3 20 | with: 21 | version: v3.14.0 22 | 23 | - uses: actions/setup-python@v4 24 | with: 25 | python-version: '3.12' 26 | check-latest: true 27 | 28 | - name: Set up chart-testing 29 | uses: helm/chart-testing-action@v2.6.1 30 | 31 | - name: Run chart-testing (list-changed) 32 | id: list-changed 33 | run: | 34 | changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }}) 35 | if [[ -n "$changed" ]]; then 36 | echo "changed=true" >> "$GITHUB_OUTPUT" 37 | fi 38 | 39 | - name: Run chart-testing (lint) 40 | if: steps.list-changed.outputs.changed == 'true' 41 | run: ct lint --target-branch ${{ github.event.repository.default_branch }} --check-version-increment=false --charts charts/memgraph 42 | 43 | 44 | - name: Create kind cluster 45 | if: steps.list-changed.outputs.changed == 'true' 46 | uses: helm/kind-action@v1.8.0 47 | 48 | - name: Set up Memgraph environment variables 49 | if: steps.list-changed.outputs.changed == 'true' 50 | run: | 51 | echo "MEMGRAPH_ENTERPRISE_LICENSE=${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}" >> $GITHUB_ENV 52 | echo "MEMGRAPH_ORGANIZATION_NAME=${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}" >> $GITHUB_ENV 53 | 54 | - name: Run chart-testing (install) 55 | if: steps.list-changed.outputs.changed == 'true' 56 | run: ct install --target-branch ${{ github.event.repository.default_branch }} --excluded-charts memgraph-high-availability, memgraph 57 | -------------------------------------------------------------------------------- /.github/workflows/lint-test-memgraph.yml: -------------------------------------------------------------------------------- 1 | name: Lint and Test Charts Memgraph 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'charts/memgraph/**' 7 | 8 | 9 | jobs: 10 | lint-test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Set up Helm 19 | uses: azure/setup-helm@v3 20 | with: 21 | version: v3.14.0 22 | 23 | - uses: actions/setup-python@v4 24 | with: 25 | python-version: '3.12' 26 | check-latest: true 27 | 28 | - name: Set up chart-testing 29 | uses: helm/chart-testing-action@v2.6.1 30 | 31 | - name: Run chart-testing (list-changed) 32 | id: list-changed 33 | run: | 34 | changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }}) 35 | if [[ -n "$changed" ]]; then 36 | echo "changed=true" >> "$GITHUB_OUTPUT" 37 | fi 38 | 39 | - name: Run chart-testing (lint) 40 | if: steps.list-changed.outputs.changed == 'true' 41 | run: ct lint --target-branch ${{ github.event.repository.default_branch }} --check-version-increment false --charts charts/memgraph-lab 42 | 43 | - name: Create kind cluster 44 | if: steps.list-changed.outputs.changed == 'true' 45 | uses: helm/kind-action@v1.8.0 46 | 47 | - name: Set up Memgraph environment variables 48 | if: steps.list-changed.outputs.changed == 'true' 49 | run: | 50 | echo "MEMGRAPH_ENTERPRISE_LICENSE=${{ secrets.MEMGRAPH_ENTERPRISE_LICENSE }}" >> $GITHUB_ENV 51 | echo "MEMGRAPH_ORGANIZATION_NAME=${{ secrets.MEMGRAPH_ORGANIZATION_NAME }}" >> $GITHUB_ENV 52 | 53 | - name: Create Kubernetes secrets 54 | if: steps.list-changed.outputs.changed == 'true' 55 | run: | 56 | kubectl create secret generic memgraph-secrets \ 57 | --from-literal=USER=memgraph \ 58 | --from-literal=PASSWORD=memgraph \ 59 | --namespace default 60 | 61 | - name: Verify Kubernetes secrets 62 | if: steps.list-changed.outputs.changed == 'true' 63 | run: kubectl get secrets --namespace default 64 | 65 | - name: Display kubectl context 66 | if: steps.list-changed.outputs.changed == 'true' 67 | run: kubectl config current-context 68 | 69 | - name: Run chart-testing (install) 70 | if: steps.list-changed.outputs.changed == 'true' 71 | run: ct install --target-branch ${{ github.event.repository.default_branch }} --helm-extra-set-args "--set secrets.enabled=true" --excluded-charts memgraph-high-availability, memgraph-lab --namespace default 72 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yaml: -------------------------------------------------------------------------------- 1 | name: Pre-commit 2 | 3 | on: pull_request 4 | 5 | jobs: 6 | pre-commit: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v3 10 | with: 11 | fetch-depth: 2 # fetches all history so pre-commit can run properly 12 | 13 | - name: Set up Python 14 | uses: actions/setup-python@v3 15 | with: 16 | python-version: '3.9' # Use Python 3.9 17 | 18 | - name: Install pre-commit 19 | run: pip install pre-commit 20 | 21 | - name: Run pre-commit 22 | run: pre-commit run --all-files --show-diff-on-failure 23 | 24 | - name: Cache the pre-commit environment 25 | uses: actions/cache@v3 26 | with: 27 | path: ~/.cache/pre-commit 28 | key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} 29 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release Charts 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | release: 8 | # depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions 9 | # see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token 10 | permissions: 11 | contents: write 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v3 16 | with: 17 | fetch-depth: 0 18 | 19 | - name: Configure Git 20 | run: | 21 | git config user.name "$GITHUB_ACTOR" 22 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 23 | 24 | - name: Install Helm 25 | uses: azure/setup-helm@v3 26 | env: 27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 28 | 29 | - name: Download dependencies 30 | run: | 31 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 32 | helm repo update 33 | 34 | - name: Update chart dependencies 35 | run: | 36 | helm dependency update charts/memgraph-high-availability 37 | 38 | - name: Run chart-releaser 39 | uses: helm/chart-releaser-action@v1.5.0 40 | env: 41 | CR_TOKEN: ${{ secrets.GITHUB_TOKEN }} 42 | CR_SKIP_EXISTING: true 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_store 2 | docker/ 3 | *.tgz 4 | .helm/ 5 | *.swp 6 | *.bak 7 | .DS_Store 8 | Thumbs.db 9 | charts/memgraph-high-availability/charts/ 10 | -------------------------------------------------------------------------------- /.helmignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .gitignore 3 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.5.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-json 8 | - id: check-yaml 9 | exclude: ^charts/(memgraph|memgraph-lab|memgraph-high-availability)/templates/ 10 | - id: mixed-line-ending 11 | - id: check-merge-conflict 12 | - id: detect-private-key 13 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute to Memgraph Helm Charts Repository? 2 | 3 | ## Code of Conduct 4 | 5 | Everyone participating in this project is governed by the [Code of 6 | Conduct](https://github.com/memgraph/memgraph/blob/master/CODE_OF_CONDUCT.md). 7 | By participating, you are expected to uphold this code. Please report 8 | unacceptable behavior to . 9 | 10 | ## Reporting Bugs 11 | 12 | This section guides you through submitting a bug report for **Memgraph Helm Charts Repository**. 13 | Following these guidelines helps maintainers and the community understand your 14 | report, reproduce the behavior, and find related reports. 15 | 16 | Before creating a bug report, please check out [GitHub 17 | Issues](https://github.com/memgraph/helm-charts/issues), as you might find out 18 | that you don't need to create one. When you are creating a bug report, please 19 | **include as many details** as possible. Fill out [the required 20 | template](https://github.com/memgraph/helm-charts/blob/main/.github/ISSUE_TEMPLATE/bug_report.yml), 21 | so we can get all the needed information to resolve the issue. 22 | 23 | > **Note:** If you find a **Closed** issue that seems like it is the same thing 24 | > that you're experiencing, open a new issue and include a link to the original 25 | > issue in the body of your new one. 26 | 27 | ## Contributing new features or bug fixes 28 | 29 | Please send a GitHub [Pull 30 | Request](https://github.com/memgraph/helm-charts/pulls) with a clear list of what 31 | you've done. Make sure all of your commits are atomic (one feature per commit). 32 | 33 | There are two main types of contributions: 34 | 1. **Bug fixes** 35 | 2. **New features** 36 | 37 | Both types of fixes should be applied to the `main` branch. 38 | 39 | In order for a pull request to be merged, a review by two code owners is required and the tests need to pass remotely. 40 | 41 | ## Contact 42 | 43 | If you need help with contributing to the Memgraph Helm Charts Repository, join our [Discord server](https://discord.gg/memgraph) or contact us at . 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Memgraph Helm Charts 2 | [![License: Apache-2.0](https://img.shields.io/github/license/memgraph/helm-charts)](https://github.com/memgraph/helm-charts/blob/main/LICENSE) 3 | [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/memgraph)](https://artifacthub.io/packages/search?repo=memgraph) 4 | [![Docs](https://img.shields.io/badge/documentation-Memgraph-orange)](https://memgraph.com/docs/) 5 | 6 | 7 | Welcome to the Memgraph Helm Charts repository. This repository provides Helm charts for deploying Memgraph, an open-source in-memory graph database. 8 | 9 | ## Available charts 10 | - [**Memgraph standalone**](#memgraph-standalone) 11 | - [**Memgraph Lab**](#memgraph-lab) 12 | - [**Memgraph high availability**](#memgraph-high-availability) 13 | 14 | ## Prerequisites 15 | Helm version 3 or above installed. 16 | 17 | ## Add the Helm repository 18 | Add the Memgraph Helm chart repository to your local Helm setup by running the following command: 19 | 20 | ``` 21 | helm repo add memgraph https://memgraph.github.io/helm-charts 22 | ``` 23 | 24 | ## Update the repository 25 | Make sure to update the repository to fetch the latest Helm charts available: 26 | 27 | ``` 28 | helm repo update 29 | ``` 30 | 31 | ## Memgraph standalone 32 | Deploys standalone Memgraph. 33 | For detailed information and usage instructions, please refer to the [chart's individual README file](./charts/memgraph/README.md). 34 | 35 | To install the Memgraph standalone chart, run the following command: 36 | 37 | ``` 38 | helm install my-release memgraph/memgraph 39 | ``` 40 | Replace `my-release` with a name of your choice for the release. 41 | 42 | 43 | Once Memgraph is installed, you can access it using the provided services and endpoints. Refer to the [Memgraph documentation](https://memgraph.com/docs/memgraph/connect-to-memgraph) for details on how to connect to and interact with Memgraph. 44 | 45 | To upgrade or uninstall a deployed Memgraph release, you can use the `helm upgrade` or `helm uninstall` commands, respectively. Refer to the [Helm documentation](https://helm.sh/docs/) for more details on these commands. 46 | 47 | ## Memgraph lab 48 | Deploys Memgraph Lab. 49 | For detailed information and usage instructions, please refer to the [chart's individual README file](./charts/memgraph-lab/README.md). 50 | 51 | To install Memgraph Lab, run the following command: 52 | 53 | ``` 54 | helm install my-release memgraph/memgraph-lab 55 | ``` 56 | Replace `my-release` with a name of your choice for the release. 57 | 58 | 59 | Refer to the [Data visualization in Memgraph Lab](https://memgraph.com/docs/data-visualization) for details on how to connect to and interact with Memgraph. 60 | 61 | To upgrade or uninstall a deployed Memgraph release, you can use the `helm upgrade` or `helm uninstall` commands, respectively. Refer to the [Helm documentation](https://helm.sh/docs/) for more details on these commands. 62 | 63 | 64 | ## Memgraph high availability 65 | Deploys high available Memgraph cluster, that includes two data instances and three coordinators. 66 | 67 | For detailed information and usage instructions, please refer to the [chart's individual README file](./charts/memgraph-high-availability/README.md). 68 | 69 | To install the chart, run the following command: 70 | 71 | ``` 72 | helm install my-release memgraph/memgraph-high-availability --set env.MEMGRAPH_ENTERPRISE_LICENSE=,env.MEMGRAPH_ORGANIZATION_NAME= 73 | ``` 74 | Replace `my-release` with a name of your choice for the release. 75 | 76 | There are a few additional steps to make the cluster fully operational. Please take a look under the [Setting up the cluster](https://memgraph.com/docs/getting-started/install-memgraph/kubernetes#setting-up-the-cluster) docs section. 77 | 78 | Once Memgraph cluster is up and running, you can access it using the provided services and endpoints. Refer to the [Memgraph documentation](https://memgraph.com/docs/memgraph/connect-to-memgraph) for details on how to connect to and interact with Memgraph. 79 | 80 | To upgrade or uninstall a deployed Memgraph release, you can use the `helm upgrade` or `helm uninstall` commands, respectively. Refer to the [Helm documentation](https://helm.sh/docs/) for more details on these commands. 81 | 82 | ## Docker Compose 83 | 84 | Creates HA Memgraph cluster with one command. The only thing you need to do is add your license details. Used bridged docker network for 85 | communication. 86 | 87 | 88 | ## Contributing 89 | Contributions are welcome! If you have any improvements, bug fixes, or new charts to add, please follow the contribution guidelines outlined in the [`CONTRIBUTING.md`](https://github.com/memgraph/helm-charts/blob/main/CONTRIBUTING.md) file. If you have questions and are unsure of how to contribute, please join our Discord server to get in touch with us. 90 | 91 |

92 | 93 | Discord 94 | 95 |

96 | 97 | ## Debugging Memgraph Pods 98 | 99 | Find more details under [Debugging Running 100 | Pods](https://memgraph.com/docs/database-management/debugging#debugging-running-pods) 101 | documentation section. 102 | 103 | ## License 104 | This repository is licensed under the [Apache 2.0 License](https://github.com/memgraph/helm-charts/blob/main/LICENSE). 105 | -------------------------------------------------------------------------------- /artifacthub-repo.yml: -------------------------------------------------------------------------------- 1 | repositoryID: 1585cfbc-12cc-4b3d-a853-d312c12e0a5f 2 | owners: # (optional, used to claim repository ownership) 3 | - name: katarinasupe 4 | email: supe.katarina@gmail.com 5 | - name: antejavor 6 | email: javor.ante@gmail.com 7 | - name: andiskrgat 8 | email: andi.skrgat@memgraph.io 9 | # ignore: # (optional, packages that should not be indexed by Artifact Hub) 10 | # - name: package1 11 | # - name: package2 # Exact match 12 | # version: beta # Regular expression (when omitted, all versions are ignored) 13 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: memgraph-high-availability 3 | description: A Helm chart for Kubernetes with Memgraph High availabiliy capabilites 4 | 5 | version: 0.2.3 6 | appVersion: "3.3.0" 7 | 8 | type: application 9 | 10 | keywords: 11 | - memgraph 12 | - graph 13 | - database 14 | - cypher 15 | - analytics 16 | - high-availability 17 | 18 | icon: https://public-assets.memgraph.com/memgraph-logo/logo-large.png 19 | 20 | home: https://memgraph.com/ 21 | 22 | sources: 23 | - "https://github.com/memgraph/memgraph" 24 | - "https://github.com/memgraph/helm-charts" 25 | 26 | maintainers: 27 | - name: Memgraph 28 | email: tech@memgraph.com 29 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/README.md: -------------------------------------------------------------------------------- 1 | # Helm chart for Memgraph high availability (HA) cluster (Enterprise) 2 | A Helm Chart for deploying Memgraph in [high availability setup](https://memgraph.com/docs/clustering/high-availability). 3 | This Helm Chart requires an [Enterprise version of Memgraph](https://memgraph.com/docs/database-management/enabling-memgraph-enterprise). 4 | 5 | Docs for how to use High availability Helm chart is available [here](https://memgraph.com/docs/getting-started/install-memgraph/kubernetes). 6 | In the aks/ and aws/ directories you find specific guides used for deploying this chart on Azure's and AWS's K8s platforms. 7 | 8 | For the quick start please refer to the [top-level README section](https://github.com/memgraph/helm-charts?tab=readme-ov-file#memgraph-high-availability). 9 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/aks/README.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | This guide instructs users on how to deploy Memgraph HA to Azure AKS. It serves only as a starting point and there are many ways possible to extend 4 | what is currently here. In this setup each Memgraph database is deployed to separate, `Standard_A2_v2`. 5 | 6 | ## Installing tools 7 | 8 | You will need: 9 | - [azure-cli](https://learn.microsoft.com/en-us/cli/azure/) 10 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) 11 | - [helm](https://helm.sh/docs/intro/install/) 12 | 13 | We used `azure-cli 2.67.0, kubectl v1.30.0 and helm 3.14.4`. 14 | 15 | ## Login with Azure-CLI 16 | 17 | Use `az login` and enter your authentication details. 18 | 19 | ## Create resource group 20 | 21 | The next step involves creating resource group which will later be attached to Kubernetes cluster. Example: 22 | ``` 23 | az group create --name ResourceGroup2 --location northeurope 24 | ``` 25 | 26 | ## Provision K8 nodes 27 | 28 | After creating resource group, K8 nodes can be created and attached to the previously created resource group. There are many other options 29 | you can use but we will cover here the simplest deployment scenario in which will we use 5 'Standard_A2_v2' instances where each instance will 30 | host its own Memgraph database. 31 | 32 | ``` 33 | az aks create --resource-group ResourceGroup2 --name memgraph-ha --node-count 5 --node-vm-size Standard_A2_v2 --generate-ssh-keys 34 | ``` 35 | 36 | ## Configure kubectl 37 | 38 | To get remote context from Azure AKS into your local kubectl, use: 39 | ``` 40 | az aks get-credentials --resource-group ResourceGroup2 --name memgraph-ha 41 | ``` 42 | 43 | ## Label nodes 44 | 45 | By running `kubectl get nodes -o=wide`, you should be able to see your nodes. Example: 46 | 47 | | NAME | STATUS | ROLES | AGE | VERSION | INTERNAL-IP | EXTERNAL-IP | OS-IMAGE | KERNEL-VERSION | CONTAINER-RUNTIME | 48 | |-----------------------------------|--------|-------|-----|---------|-------------|-------------|--------------------|----------------------|------------------------| 49 | | aks-nodepool1-65392319-vmss000000 | Ready | | 11m | v1.29.9 | 10.224.0.4 | | Ubuntu 22.04.5 LTS | 5.15.0-1074-azure | containerd://1.7.23-1 | 50 | | aks-nodepool1-65392319-vmss000001 | Ready | | 12m | v1.29.9 | 10.224.0.8 | | Ubuntu 22.04.5 LTS | 5.15.0-1074-azure | containerd://1.7.23-1 | 51 | | aks-nodepool1-65392319-vmss000002 | Ready | | 12m | v1.29.9 | 10.224.0.6 | | Ubuntu 22.04.5 LTS | 5.15.0-1074-azure | containerd://1.7.23-1 | 52 | | aks-nodepool1-65392319-vmss000003 | Ready | | 11m | v1.29.9 | 10.224.0.5 | | Ubuntu 22.04.5 LTS | 5.15.0-1074-azure | containerd://1.7.23-1 | 53 | | aks-nodepool1-65392319-vmss000004 | Ready | | 11m | v1.29.9 | 10.224.0.7 | | Ubuntu 22.04.5 LTS | 5.15.0-1074-azure | containerd://1.7.23-1 | 54 | 55 | Most often users will use smaller nodes for 3 coordinators and bigger nodes for data instances. To be able to do that, we will label first 56 | 3 nodes with `role=coordinator-node` and the last 2 with `role=data-node`. 57 | 58 | ``` 59 | kubectl label nodes aks-nodepool1-65392319-vmss000000 role=coordinator-node 60 | kubectl label nodes aks-nodepool1-65392319-vmss000001 role=coordinator-node 61 | kubectl label nodes aks-nodepool1-65392319-vmss000002 role=coordinator-node 62 | kubectl label nodes aks-nodepool1-65392319-vmss000003 role=data-node 63 | kubectl label nodes aks-nodepool1-65392319-vmss000004 role=data-node 64 | ``` 65 | 66 | In the following chapters, we will go over several most common deployment types: 67 | 68 | ## Service type = IngressNginx 69 | 70 | The most cost-friendly way to manage a Memgraph HA cluster in K8s is using a IngressNginx contoller. This controller is capable of routing TCP messages on Bolt level 71 | protocol to the K8s Memgraph services. To achieve this, it uses only a single LoadBalancer which means there is only a single external IP for connecting to the cluster. 72 | Users can connect to any coordinator or data instance by distinguishing bolt ports. First install Memgraph HA: 73 | 74 | ``` 75 | helm install mem-ha-test ./charts/memgraph-high-availability --set \ 76 | env.MEMGRAPH_ENTERPRISE_LICENSE=,\ 77 | env.MEMGRAPH_ORGANIZATION_NAME=,affinity.nodeSelection=true,\ 78 | externalAccessConfig.dataInstance.serviceType=IngressNginx,externalAccessConfig.coordinator.serviceType=IngressNginx 79 | ``` 80 | 81 | After that, connect Memgraph instances using LoadBalancer's external IP. You can find that out 82 | by running `kubectl get svc -o=wide -A`. 83 | 84 | ``` 85 | ADD COORDINATOR 1 WITH CONFIG {"bolt_server": ":9011", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; 86 | ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":9012", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; 87 | ADD COORDINATOR 3 WITH CONFIG {"bolt_server": ":9013", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; 88 | REGISTER INSTANCE instance_0 WITH CONFIG {"bolt_server": ":9000", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; 89 | REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":9001", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; 90 | SET INSTANCE instance_1 TO MAIN; 91 | ``` 92 | 93 | ## ServiceType = LoadBalancer 94 | 95 | After preparing nodes, we can deploy Memgraph HA cluster by using `helm install` command. We will specify affinity options so that node labels 96 | are used and so that each data and coordinator instance is exposed through LoadBalancer. 97 | 98 | ``` 99 | helm install mem-ha-test ./charts/memgraph-high-availability --set \ 100 | env.MEMGRAPH_ENTERPRISE_LICENSE=,\ 101 | env.MEMGRAPH_ORGANIZATION_NAME=,affinity.nodeSelection=true,\ 102 | externalAccessConfig.dataInstance.serviceType=LoadBalancer,externalAccessConfig.coordinator.serviceType=LoadBalancer 103 | ``` 104 | 105 | By running `kubectl get svc -o=wide` and `kubectl get pods -o=wide` we can verify that deployment finished successfully. Example: 106 | 107 | | NAME | TYPE | CLUSTER-IP | EXTERNAL-IP | PORT(S) | AGE | SELECTOR | 108 | |---------------------------------|--------------|--------------|-----------------|----------------------------------|-----|----------------------------| 109 | | kubernetes | ClusterIP | 10.0.0.1 | `` | 443/TCP | 21m | `` | 110 | | memgraph-coordinator-1 | ClusterIP | 10.0.65.178 | `` | 7687/TCP,12000/TCP,10000/TCP | 63s | app=memgraph-coordinator-1 | 111 | | memgraph-coordinator-1-external | LoadBalancer | 10.0.28.222 | 172.205.93.228 | 7687:30402/TCP | 63s | app=memgraph-coordinator-1 | 112 | | memgraph-coordinator-2 | ClusterIP | 10.0.129.252 | `` | 7687/TCP,12000/TCP,10000/TCP | 63s | app=memgraph-coordinator-2 | 113 | | memgraph-coordinator-2-external | LoadBalancer | 10.0.102.4 | 4.209.216.240 | 7687:32569/TCP | 63s | app=memgraph-coordinator-2 | 114 | | memgraph-coordinator-3 | ClusterIP | 10.0.42.32 | `` | 7687/TCP,12000/TCP,10000/TCP | 63s | app=memgraph-coordinator-3 | 115 | | memgraph-coordinator-3-external | LoadBalancer | 10.0.208.244 | 68.219.15.104 | 7687:30874/TCP | 63s | app=memgraph-coordinator-3 | 116 | | memgraph-data-0 | ClusterIP | 10.0.227.204 | `` | 7687/TCP,10000/TCP,20000/TCP | 63s | app=memgraph-data-0 | 117 | | memgraph-data-0-external | LoadBalancer | 10.0.78.197 | 68.219.11.242 | 7687:31823/TCP | 63s | app=memgraph-data-0 | 118 | | memgraph-data-1 | ClusterIP | 10.0.251.227 | `` | 7687/TCP,10000/TCP,20000/TCP | 63s | app=memgraph-data-1 | 119 | | memgraph-data-1-external | LoadBalancer | 10.0.147.131 | 68.219.13.145 | 7687:30733/TCP | 63s | app=memgraph-data-1 | 120 | 121 | 122 | | NAME | READY | STATUS | RESTARTS | AGE | IP | NODE | NOMINATED NODE | READINESS GATES | 123 | |-----------------------------|-------|---------|----------|-----|------------|------------------------------------|----------------|-----------------| 124 | | memgraph-coordinator-1-0 | 1/1 | Running | 0 | 80s | 10.244.0.3 | aks-nodepool1-65392319-vmss000001 | `` | `` | 125 | | memgraph-coordinator-2-0 | 1/1 | Running | 0 | 80s | 10.244.3.3 | aks-nodepool1-65392319-vmss000000 | `` | `` | 126 | | memgraph-coordinator-3-0 | 1/1 | Running | 0 | 80s | 10.244.1.8 | aks-nodepool1-65392319-vmss000002 | `` | `` | 127 | | memgraph-data-0-0 | 1/1 | Running | 0 | 80s | 10.244.4.3 | aks-nodepool1-65392319-vmss000004 | `` | `` | 128 | | memgraph-data-1-0 | 1/1 | Running | 0 | 80s | 10.244.2.2 | aks-nodepool1-65392319-vmss000003 | `` | `` | 129 | 130 | ## Connect cluster 131 | 132 | The only remaining step left is to connect instances. For this we will use Memgraph Lab. Open Lab and use Memgraph instance type of connection. 133 | For the host enter external ip of `memgraph-coordinator-1-external` and port is 7687. Both for adding coordinators and registering instances, 134 | we only need to change 'bolt\_server' part to use LoadBalancers' external IP. 135 | 136 | ``` 137 | ADD COORDINATOR 1 WITH CONFIG {"bolt_server": "172.205.93.228:7687", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; 138 | ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "4.209.216.240:7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; 139 | ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "68.219.15.104:7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; 140 | REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "68.219.11.242:7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; 141 | REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "68.219.13.145:7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; 142 | SET INSTANCE instance_1 TO MAIN; 143 | ``` 144 | 145 | The output of `SHOW INSTANCES` should then look similar to: 146 | 147 | ``` 148 | | name | bolt_server | coordinator_server | management_server | health | role | last_succ_resp_ms | 149 | |-----------------|---------------------------------------------------------|----------------------------------------------------------|----------------------------------------------------------|---------|-----------|-------------------| 150 | | "coordinator_1" | "172.205.93.228:7687" | "memgraph-coordinator-1.default.svc.cluster.local:12000" | "memgraph-coordinator-1.default.svc.cluster.local:10000" | "up" | "leader" | 0 | 151 | | "coordinator_2" | "4.209.216.240:7687" | "memgraph-coordinator-2.default.svc.cluster.local:12000" | "memgraph-coordinator-2.default.svc.cluster.local:10000" | "up" | "follower"| 550 | 152 | | "coordinator_3" | "68.219.15.104:7687" | "memgraph-coordinator-3.default.svc.cluster.local:12000" | "memgraph-coordinator-3.default.svc.cluster.local:10000" | "up" | "follower"| 26 | 153 | | "instance_1" | "68.219.11.242:7687" | "" | "memgraph-data-0.default.svc.cluster.local:10000" | "up" | "main" | 917 | 154 | | "instance_2" | "68.219.13.145:7687" | "" | "memgraph-data-1.default.svc.cluster.local:10000" | "up" | "replica" | 266 | 155 | ``` 156 | 157 | ## Using CommonLoadBalancer 158 | 159 | When using 'CommonLoadBalancer', all three coordinators will be behind a single LoadBalancer while each data instance has their own load balancer. To connect the cluster, open Lab and use Memgraph 160 | instance type of connection. For the host enter external IP of `memgraph-coordinator-1-external` and port is 7687. Again, we only need to change 161 | 'bolt\_server' part to use LoadBalancers' external IP. When connecting to CommonLoadBalancer, K8 will automatically route you to one of coordinators. 162 | To see on which coordinator did you end routed, run `SHOW INSTANCE`. If for example, the output of the query says you are connected to 163 | coordinator 2, we need to add coordinators 1 and 3. Registering data instances stays exactly the same. 164 | 165 | ``` 166 | ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; 167 | ADD COORDINATOR 1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; 168 | ADD COORDINATOR 3 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; 169 | REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "68.219.11.242:7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; 170 | REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "68.219.13.145:7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; 171 | SET INSTANCE instance_1 TO MAIN; 172 | ``` 173 | 174 | ## Memgraph HA storage model 175 | 176 | Each Memgraph instance stores its data and logs in two separate storages. You usually don't want to manually inspect data from the 177 | data directory, you only want to be able to recover data when starting a new instance. For working with persistent data, Kubernetes uses 178 | persistent volumes (PV) and persistent volume claims (PVC). You can think of persistent volumes as the actual storage where the data is stored 179 | while persistent volume claims are requests to attach PVs to your pods. You can find more details about the concept of storage in 180 | Kubernetes [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). At the moment for HA chart, we use dynamically created 181 | PVCs which won't get deleted upon uninstallation of the chart. However, if you do `kubectl delete pvc -A`, it will also delete underlying 182 | persistent volumes since the default policy is Delete. This also means that when you upgrade a chart, all data will be preserved. 183 | 184 | 185 | Inspecting logs can be very valuable e.g when sending a bug report in the case of a pod crash. In that case, `kubectl logs` doesn't help because 186 | it doesn't show logs before the crash. 187 | 188 | There are two possible ways in which you can get to your PVs. Note that you can retrieve your data directory in the same way as logs so the 189 | following two chapters apply in both cases. 190 | 191 | ### Attaching disk to VM for Azure Disk storage 192 | 193 | Azure Disk is the default storage class for Azure AKS. It is a block storage which doesn't allow simultaneous access from multiple pods. Therefore, in order to retrieve logs 194 | we will create a snapshot of the disk, create a temporary virtual machine and attach copy of the disk to the newly created VM. 195 | 196 | Let's say that coordinator-1 pod crashed and you want to send us logs so we can figure out what happened. Run 197 | 198 | ``` 199 | kubectl get pv -A 200 | ``` 201 | 202 | to find the ID of the PV that coordinator 1 uses. The PV's ID and also serves as the name of the disk used as the underlying 203 | storage. We will use this information to create a snapshot of the disk using: 204 | 205 | ``` 206 | az snapshot create \ 207 | --resource-group \ 208 | --source /subscriptions//resourceGroups//providers/Microsoft.Compute/disks/ \ 209 | --name coord1-log-snapshot 210 | ``` 211 | 212 | If you are not sure about the resource group of the disk, you can run: 213 | 214 | ``` 215 | az disk list --output table 216 | ``` 217 | 218 | to find it out. Using the created snapshot, we will create a new disk using the following command: 219 | 220 | ``` 221 | az disk create \ 222 | --resource-group \ 223 | --source coord1-log-snapshot \ 224 | --name coord1-log-disk \ 225 | --zone 1 226 | ``` 227 | 228 | The next step consists of creating a virtual machine for which any reasonable default settings will work. It is only important that it is in the same region as newly created disk copy. 229 | Note that one VM can be used to attaching as many disks as you want so you don't need to create a separate VM every time. For creating a VM we used Azure Portal. After you have created 230 | the VM, you can attach disk to the VM using: 231 | 232 | ``` 233 | az vm disk attach \ 234 | --resource-group \ 235 | --vm-name \ 236 | --disk /subscriptions//resourceGroups//providers/Microsoft.Compute/disks/coord1-log-disk 237 | ``` 238 | 239 | SSH into the VM and by running you should be able to see your disk (sdc, sdd usually are names) by running `lsblk`. Create a new directory and mount the disk. 240 | ``` 241 | sudo mkdir /mnt/coord1 242 | sudo mount /dev/ /mnt/coord1 243 | ``` 244 | You can now copy it to the local machine using scp. 245 | 246 | ### Creating a debug pod for Azure File storage 247 | 248 | When using Azure File storage, the easiest way to retrieve data is to create a debug pod which attaches to a PV and mounts it locally. In order to support it, you need to use the 249 | Azure File type of storage with PVCs access mode set to `ReadWriteMany`. The default storage uses Azure Disk which is a block storage operating as a physical disk which doesn't allow multiple pods to mount the disk simultaneously. 250 | The example of a debug pod for retrieving data from coordinator 1 looks something like: 251 | ``` 252 | apiVersion: v1 253 | kind: Pod 254 | metadata: 255 | name: debug-pod 256 | namespace: 257 | spec: 258 | tolerations: 259 | - operator: "Exists" 260 | containers: 261 | - name: debug-container 262 | image: busybox 263 | command: [ "/bin/sh", "-c", "--" ] 264 | args: [ "while true; do sleep 30; done;" ] 265 | volumeMounts: 266 | - name: my-debug-volume 267 | mountPath: /coord1-logs 268 | volumes: 269 | - name: my-debug-volume 270 | persistentVolumeClaim: 271 | claimName: memgraph-coordinator-1-log-storage-memgraph-coordinator-1-0 272 | ``` 273 | Note that you need to set `metadata.namespace` to the namespace where your instances are installed. Start your pod with: 274 | ``` 275 | kubectl apply -f debug-pod.yaml -n 276 | ``` 277 | 278 | and login into it with: 279 | ``` 280 | kubectl exec -it debug-pod -- /bin/sh 281 | ``` 282 | 283 | Your data should now be seen at `/coord1-logs` directory. 284 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/aws/README.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | This guide instructs users on how to deploy Memgraph HA to AWS EKS using `NodePort` services. It serves only as a starting point and there are many ways possible to extend what is currently here. In this setup 4 | each Memgraph database is deployed to separate, `t3.small` node in the `eu-west-1` AWS region. 5 | 6 | ## Installation 7 | 8 | You will need: 9 | - [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) 10 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) 11 | - [eksctl](https://docs.aws.amazon.com/eks/latest/userguide/setting-up.html) 12 | - [helm](https://helm.sh/docs/intro/install/) 13 | 14 | We used `kubectl 1.30.0, aws 2.17.29, eksctl 0.188.0 and helm 3.14.4`. 15 | 16 | ## Configure AWS CLI 17 | 18 | Use `aws configure` and enter your `AWS Access Key ID, Secret Access Key, Region and output format`. 19 | 20 | ## Create EKS Cluster 21 | 22 | We provide you with the sample configuration file for AWS in this folder. Running 23 | 24 | ``` 25 | eksctl create cluster -f cluster.yaml` 26 | ``` 27 | 28 | should be sufficient. Make sure to change the path to the public SSH key if you want to have SSH access to EC2 instances. After creating the cluster, `kubectl` should pick up 29 | the AWS context and you can verify this by running `kubectl config current-context`. My is pointing to `andi.skrgat@test-cluster-ha.eu-west-1.eksctl.io`. 30 | 31 | ## Add Helm Charts repository 32 | 33 | If you don't have installed Memgraph Helm repo, please make sure you by running: 34 | 35 | ``` 36 | helm repo add memgraph https://memgraph.github.io/helm-charts 37 | helm repo list 38 | helm repo update 39 | ``` 40 | 41 | ## Install the AWS CSI driver 42 | 43 | Once EKS nodes are started, you need to install AWS Elastic Block Store CSI driver so the cluster can auto-manage EBS resources from AWS. Run the following: 44 | 45 | ``` 46 | kubectl apply -k "github.com/kubernetes-sigs/aws-ebs-csi-driver/deploy/kubernetes/overlays/stable/ecr/?ref=release-1.25" 47 | ``` 48 | 49 | ## Authentication and authorization 50 | 51 | Before deploying the cluster, you need to provide access to the NodeInstanceRole. First find the name of the role with 52 | 53 | ``` 54 | aws eks describe-nodegroup --cluster-name test-cluster-ha --nodegroup-name standard-workers 55 | ``` 56 | 57 | and then provide full access to it: 58 | 59 | ``` 60 | aws iam attach-role-policy --role-name eksctl-test-cluster-ha-nodegroup-s-NodeInstanceRole- --policy-arn arn:aws:iam::aws:policy/AmazonEC2FullAccess 61 | aws iam list-attached-role-policies --role-name eksctl-test-cluster-ha-nodegroup-s-NodeInstanceRole- 62 | ``` 63 | 64 | When using `NodePort` services, it is important to create Inbound Rule in the Security Group attached to the eksctl cluster which will allow TCP traffic 65 | on ports 30000-32767. We find it easiest to modify this by going to the EC2 Dashboard. 66 | 67 | ## Label nodes 68 | 69 | This guide uses a `nodeSelection` affinity option. Make sure to label nodes where you want to have deployed coordinators with role `coordinator-node` 70 | and nodes where you want to have deployed data instances with role `data-node`. 71 | 72 | Example: 73 | ``` 74 | kubectl label nodes node-000000 role=coordinator-node 75 | kubectl label nodes node-000001 role=coordinator-node 76 | kubectl label nodes node-000002 role=coordinator-node 77 | kubectl label nodes node-000003 role=data-node 78 | kubectl label nodes node-000004 role=data-node 79 | ``` 80 | 81 | ## Deploy Memgraph cluster 82 | 83 | We can now install Memgraph HA chart using the following command: 84 | 85 | ``` 86 | helm install mem-ha-test ./charts/memgraph-high-availability --set \ 87 | env.MEMGRAPH_ENTERPRISE_LICENSE=, \ 88 | env.MEMGRAPH_ORGANIZATION_NAME=, \ 89 | storage.coordinators.libStorageClassName=gp2, \ 90 | storage.data.libStorageClassName=gp2, \ 91 | storage.coordinators.logStorageClassName=gp2, \ 92 | storage.data.logStorageClassName=gp2, \ 93 | affinity.nodeSelection=true, \ 94 | externalAccessConfig.dataInstance.serviceType=NodePort, \ 95 | externalAccessConfig.coordinator.serviceType=NodePort 96 | ``` 97 | 98 | The only remaining step is to connect instances to form a cluster: 99 | ``` 100 | ADD COORDINATOR 1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-1.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-1.default.svc.cluster.local:12000"}; 101 | ADD COORDINATOR 2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-2.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-2.default.svc.cluster.local:12000"}; 102 | ADD COORDINATOR 3 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-coordinator-3.default.svc.cluster.local:10000", "coordinator_server": "memgraph-coordinator-3.default.svc.cluster.local:12000"}; 103 | REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-data-0.default.svc.cluster.local:10000", "replication_server": "memgraph-data-0.default.svc.cluster.local:20000"}; 104 | REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": ":7687", "management_server": "memgraph-data-1.default.svc.cluster.local:10000", "replication_server": "memgraph-data-1.default.svc.cluster.local:20000"}; 105 | SET INSTANCE instance_1 TO MAIN; 106 | 107 | ``` 108 | 109 | 110 | You can check the state of the cluster with `kubectl get pods -o wide`. 111 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/aws/cluster.yaml: -------------------------------------------------------------------------------- 1 | accessConfig: 2 | authenticationMode: API_AND_CONFIG_MAP 3 | addonsConfig: {} 4 | apiVersion: eksctl.io/v1alpha5 5 | availabilityZones: 6 | - eu-west-1a 7 | - eu-west-1c 8 | - eu-west-1b 9 | cloudWatch: 10 | clusterLogging: {} 11 | iam: 12 | vpcResourceControllerPolicy: true 13 | withOIDC: false 14 | kind: ClusterConfig 15 | kubernetesNetworkConfig: 16 | ipFamily: IPv4 17 | managedNodeGroups: 18 | - amiFamily: AmazonLinux2 19 | desiredCapacity: 5 20 | disableIMDSv1: true 21 | disablePodIMDS: false 22 | iam: 23 | withAddonPolicies: 24 | albIngress: false 25 | appMesh: false 26 | appMeshPreview: false 27 | autoScaler: false 28 | awsLoadBalancerController: false 29 | certManager: false 30 | cloudWatch: false 31 | ebs: false 32 | efs: false 33 | externalDNS: false 34 | fsx: false 35 | imageBuilder: false 36 | xRay: false 37 | instanceSelector: {} 38 | instanceType: t3.small 39 | labels: 40 | alpha.eksctl.io/cluster-name: mg-ha 41 | alpha.eksctl.io/nodegroup-name: standard-workers 42 | maxSize: 5 43 | minSize: 5 44 | name: standard-workers 45 | privateNetworking: false 46 | releaseVersion: "" 47 | securityGroups: 48 | withLocal: null 49 | withShared: null 50 | ssh: 51 | allow: true 52 | publicKeyPath: ~/.ssh/id_rsa.pub 53 | tags: 54 | alpha.eksctl.io/nodegroup-name: standard-workers 55 | alpha.eksctl.io/nodegroup-type: managed 56 | volumeIOPS: 3000 57 | volumeSize: 80 58 | volumeThroughput: 125 59 | volumeType: gp3 60 | metadata: 61 | name: mg-ha 62 | region: eu-west-1 63 | version: "1.30" 64 | privateCluster: 65 | enabled: false 66 | skipEndpointCreation: false 67 | vpc: 68 | autoAllocateIPv6: false 69 | cidr: 192.168.0.0/16 70 | clusterEndpoints: 71 | privateAccess: false 72 | publicAccess: true 73 | manageSharedNodeSecurityGroupRules: true 74 | nat: 75 | gateway: Single 76 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing the Memgraph High-availability cluster (Enterprise)! 🎉 2 | 3 | You can find information about installing this chart on Memgraph docs https://memgraph.com/docs/getting-started/install-memgraph/kubernetes. 4 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | 2 | {{/* Full name of the application */}} 3 | {{- define "memgraph.fullname" -}} 4 | {{- if .Values.fullnameOverride -}} 5 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- else -}} 7 | {{- $name := default .Chart.Name .Values.nameOverride -}} 8 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 9 | {{- end -}} 10 | {{- end -}} 11 | 12 | 13 | 14 | {{/* Define the chart version and app version */}} 15 | {{- define "memgraph.chart" -}} 16 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 17 | {{- end -}} 18 | 19 | 20 | {{/* Define the name of the application */}} 21 | {{- define "memgraph.name" -}} 22 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 23 | {{- end -}} 24 | 25 | 26 | {{/* Common labels */}} 27 | {{- define "memgraph.labels" -}} 28 | app.kubernetes.io/name: {{ include "memgraph.name" . }} 29 | helm.sh/chart: {{ include "memgraph.chart" . }} 30 | app.kubernetes.io/instance: {{ .Release.Name }} 31 | app.kubernetes.io/managed-by: {{ .Release.Service }} 32 | {{- end -}} 33 | 34 | 35 | {{/* 36 | Create the name of the service account to use 37 | */}} 38 | {{- define "memgraph.serviceAccountName" -}} 39 | {{- if .Values.serviceAccount.create }} 40 | {{- default (include "memgraph.fullname" .) .Values.serviceAccount.name }} 41 | {{- else }} 42 | {{- default "default" .Values.serviceAccount.name }} 43 | {{- end }} 44 | {{- end }} 45 | 46 | {{- define "container.data.readinessProbe" -}} 47 | readinessProbe: 48 | tcpSocket: 49 | port: {{ .tcpSocket.port }} 50 | failureThreshold: {{ .failureThreshold }} 51 | timeoutSeconds: {{ .timeoutSeconds }} 52 | periodSeconds: {{ .periodSeconds }} 53 | {{- end }} 54 | 55 | 56 | {{- define "container.data.livenessProbe" -}} 57 | livenessProbe: 58 | tcpSocket: 59 | port: {{ .tcpSocket.port }} 60 | failureThreshold: {{ .failureThreshold }} 61 | timeoutSeconds: {{ .timeoutSeconds }} 62 | periodSeconds: {{ .periodSeconds }} 63 | {{- end }} 64 | 65 | 66 | {{- define "container.data.startupProbe" -}} 67 | startupProbe: 68 | tcpSocket: 69 | port: {{ .tcpSocket.port }} 70 | failureThreshold: {{ .failureThreshold }} 71 | timeoutSeconds: {{ .timeoutSeconds }} 72 | periodSeconds: {{ .periodSeconds }} 73 | {{- end }} 74 | 75 | 76 | 77 | {{- define "container.coordinators.readinessProbe" -}} 78 | readinessProbe: 79 | tcpSocket: 80 | port: {{ .tcpSocket.port }} 81 | failureThreshold: {{ .failureThreshold }} 82 | timeoutSeconds: {{ .timeoutSeconds }} 83 | periodSeconds: {{ .periodSeconds }} 84 | {{- end }} 85 | 86 | 87 | {{- define "container.coordinators.livenessProbe" -}} 88 | livenessProbe: 89 | tcpSocket: 90 | port: {{ .tcpSocket.port }} 91 | failureThreshold: {{ .failureThreshold }} 92 | timeoutSeconds: {{ .timeoutSeconds }} 93 | periodSeconds: {{ .periodSeconds }} 94 | {{- end }} 95 | 96 | 97 | {{- define "container.coordinators.startupProbe" -}} 98 | startupProbe: 99 | tcpSocket: 100 | port: {{ .tcpSocket.port }} 101 | failureThreshold: {{ .failureThreshold }} 102 | timeoutSeconds: {{ .timeoutSeconds }} 103 | periodSeconds: {{ .periodSeconds }} 104 | {{- end }} 105 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/coordinators.yaml: -------------------------------------------------------------------------------- 1 | {{- range $index, $coordinator := .Values.coordinators }} 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: memgraph-coordinator-{{ $coordinator.id }} 6 | spec: 7 | serviceName: "memgraph-coordinator-{{ $coordinator.id }}" 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: memgraph-coordinator-{{ $coordinator.id }} 12 | role: coordinator 13 | template: 14 | metadata: 15 | labels: 16 | app: memgraph-coordinator-{{ $coordinator.id }} 17 | role: coordinator 18 | spec: 19 | affinity: 20 | {{- if $.Values.affinity.nodeSelection }} 21 | # Node Selection Affinity: Scheduled on nodes with specific label key and value 22 | nodeAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | nodeSelectorTerms: 25 | - matchExpressions: 26 | - key: {{ $.Values.affinity.roleLabelKey }} 27 | operator: In 28 | values: 29 | - {{ $.Values.affinity.coordinatorNodeLabelValue }} 30 | podAntiAffinity : 31 | requiredDuringSchedulingIgnoredDuringExecution: 32 | - labelSelector: 33 | matchExpressions: 34 | - key: role 35 | operator: In 36 | values: 37 | - coordinator 38 | topologyKey: "kubernetes.io/hostname" 39 | {{- else if $.Values.affinity.unique }} 40 | # Unique Affinity: Schedule pods on different nodes 41 | podAntiAffinity: 42 | requiredDuringSchedulingIgnoredDuringExecution: 43 | - labelSelector: 44 | matchExpressions: 45 | - key: role 46 | operator: In 47 | values: 48 | - coordinator 49 | - data 50 | topologyKey: "kubernetes.io/hostname" 51 | {{- else if $.Values.affinity.parity }} 52 | # Parity Affinity: One coordinator and one data node per node, coordinator schedules first, needs to be in pairs 53 | podAntiAffinity: 54 | requiredDuringSchedulingIgnoredDuringExecution: 55 | - labelSelector: 56 | matchExpressions: 57 | - key: role 58 | operator: In 59 | values: 60 | - coordinator 61 | topologyKey: "kubernetes.io/hostname" 62 | {{- else }} 63 | # Default Affinity: Avoid scheduling on the same node 64 | podAntiAffinity: 65 | preferredDuringSchedulingIgnoredDuringExecution: 66 | - weight: 50 67 | podAffinityTerm: 68 | labelSelector: 69 | matchExpressions: 70 | - key: role 71 | operator: In 72 | values: 73 | - coordinator 74 | topologyKey: "kubernetes.io/hostname" 75 | {{- end }} 76 | initContainers: 77 | - name: init 78 | image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" 79 | volumeMounts: 80 | - name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage 81 | mountPath: /var/lib/memgraph 82 | - name: memgraph-coordinator-{{ $coordinator.id }}-log-storage 83 | mountPath: /var/log/memgraph 84 | command: [ "/bin/sh","-c" ] 85 | # The permissions have to be explicitly adjusted because under some k8s 86 | # environments permissions set under 87 | # https://github.com/memgraph/memgraph/blob/master/release/debian/postinst 88 | # get overwritten. Sometimes, PVC are created using new partitions -> 89 | # lost+found directory should not change its permissions so it has to 90 | # be excluded. 91 | args: 92 | - > 93 | cd /var/log/memgraph; 94 | find . -path ./lost+found -prune -o -exec chown {{ $.Values.memgraphUserGroupId }} {} +; 95 | cd /var/lib/memgraph; 96 | find . -path ./lost+found -prune -o -exec chown {{ $.Values.memgraphUserGroupId }} {} +; 97 | {{- if $.Values.storage.coordinators.createCoreDumpsClaim }} 98 | cd {{ $.Values.storage.coordinators.coreDumpsMountPath }}; 99 | find . -path ./lost+found -prune -o -exec chown {{ $.Values.memgraphUserGroupId }} {} +; 100 | {{- end }} 101 | securityContext: 102 | readOnlyRootFilesystem: true 103 | runAsUser: 0 # Run as root 104 | capabilities: 105 | drop: [ "ALL" ] 106 | add: [ "CHOWN" ] 107 | {{- if $.Values.sysctlInitContainer.enabled }} 108 | - name: init-sysctl 109 | image: "{{ $.Values.sysctlInitContainer.image.repository }}:{{ $.Values.sysctlInitContainer.image.tag }}" 110 | imagePullPolicy: {{ $.Values.sysctlInitContainer.image.pullPolicy }} 111 | command: ['sh', '-c', 'sysctl -w vm.max_map_count={{ $.Values.sysctlInitContainer.maxMapCount }}'] 112 | securityContext: 113 | privileged: true 114 | runAsUser: 0 115 | {{- end }} 116 | 117 | {{- if $.Values.storage.coordinators.createCoreDumpsClaim }} 118 | - name: init-core-dumps 119 | image: busybox 120 | command: ['/bin/sh', '-c'] 121 | args: 122 | - > 123 | echo '{{ $.Values.storage.coordinators.coreDumpsMountPath }}/core.%e.%p.%t.%s' | tee /proc/sys/kernel/core_pattern; 124 | if [ -d /proc/sys/kernel/yama ]; then echo '0' | tee /proc/sys/kernel/yama/ptrace_scope; fi 125 | securityContext: 126 | privileged: true 127 | runAsUser: 0 128 | {{- end }} 129 | 130 | containers: 131 | - name: memgraph-coordinator 132 | image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" 133 | imagePullPolicy: {{ $.Values.image.pullPolicy }} 134 | ports: 135 | - containerPort: {{ $.Values.ports.boltPort }} 136 | - containerPort: {{ $.Values.ports.managementPort }} 137 | - containerPort: {{ $.Values.ports.coordinatorPort }} 138 | args: 139 | {{- range $arg := $coordinator.args }} 140 | - "{{ $arg }}" 141 | {{- end }} 142 | env: 143 | {{- if $.Values.secrets.enabled }} 144 | - name: MEMGRAPH_USER 145 | valueFrom: 146 | secretKeyRef: 147 | name: {{ $.Values.secrets.name }} 148 | key: {{ $.Values.secrets.userKey }} 149 | - name: MEMGRAPH_PASSWORD 150 | valueFrom: 151 | secretKeyRef: 152 | name: {{ $.Values.secrets.name }} 153 | key: {{ $.Values.secrets.passwordKey }} 154 | {{- end }} 155 | - name: MEMGRAPH_ENTERPRISE_LICENSE 156 | value: {{ $.Values.env.MEMGRAPH_ENTERPRISE_LICENSE }} 157 | - name: MEMGRAPH_ORGANIZATION_NAME 158 | value: {{ $.Values.env.MEMGRAPH_ORGANIZATION_NAME }} 159 | volumeMounts: 160 | - name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage 161 | mountPath: /var/lib/memgraph 162 | - name: memgraph-coordinator-{{ $coordinator.id }}-log-storage 163 | mountPath: /var/log/memgraph 164 | securityContext: 165 | allowPrivilegeEscalation: false 166 | capabilities: 167 | drop: [ "ALL" ] 168 | # Run by 'memgraph' user as specified in the Dockerfile 169 | {{- include "container.coordinators.readinessProbe" $.Values.container.coordinators.readinessProbe | nindent 8 }} 170 | {{- include "container.coordinators.livenessProbe" $.Values.container.coordinators.livenessProbe | nindent 8 }} 171 | {{- include "container.coordinators.startupProbe" $.Values.container.coordinators.startupProbe | nindent 8 }} 172 | {{- with $.Values.resources.coordinators }} 173 | resources: 174 | {{- toYaml . | nindent 10 }} 175 | {{- end }} 176 | 177 | volumeClaimTemplates: 178 | - metadata: 179 | name: memgraph-coordinator-{{ $coordinator.id }}-lib-storage 180 | spec: 181 | accessModes: 182 | - {{ $.Values.storage.coordinators.libStorageAccessMode }} 183 | storageClassName: {{ $.Values.storage.coordinators.libStorageClassName }} 184 | resources: 185 | requests: 186 | storage: {{ $.Values.storage.coordinators.libPVCSize }} 187 | 188 | - metadata: 189 | name: memgraph-coordinator-{{ $coordinator.id }}-log-storage 190 | spec: 191 | accessModes: 192 | - {{ $.Values.storage.coordinators.logStorageAccessMode }} 193 | storageClassName: {{ $.Values.storage.coordinators.logStorageClassName }} 194 | resources: 195 | requests: 196 | storage: {{ $.Values.storage.coordinators.logPVCSize }} 197 | 198 | {{- if $.Values.storage.coordinators.createCoreDumpsClaim }} 199 | - metadata: 200 | name: memgraph-coordinator-{{ $coordinator.id }}-core-dumps-storage 201 | spec: 202 | accessModes: 203 | - "ReadWriteOnce" 204 | {{- if $.Values.storage.coordinators.coreDumpsStorageClassName }} 205 | storageClassName: {{ $.Values.storage.coordinators.coreDumpsStorageClassName }} 206 | {{- end }} 207 | resources: 208 | requests: 209 | storage: {{ $.Values.storage.coordinators.coreDumpsStorageSize }} 210 | {{- end }} 211 | --- 212 | {{- end }} 213 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/data.yaml: -------------------------------------------------------------------------------- 1 | {{- range $index, $data := .Values.data }} 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: memgraph-data-{{ $data.id }} 6 | spec: 7 | serviceName: "memgraph-data-{{ $data.id }}" 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: memgraph-data-{{ $data.id }} 12 | role: data 13 | template: 14 | metadata: 15 | labels: 16 | app: memgraph-data-{{ $data.id }} 17 | role: data 18 | spec: 19 | affinity: 20 | {{- if $.Values.affinity.nodeSelection }} 21 | # Node Selection Affinity: Scheduled on nodes with specific label key and value 22 | nodeAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | nodeSelectorTerms: 25 | - matchExpressions: 26 | - key: {{ $.Values.affinity.roleLabelKey }} 27 | operator: In 28 | values: 29 | - {{ $.Values.affinity.dataNodeLabelValue }} 30 | podAntiAffinity: 31 | requiredDuringSchedulingIgnoredDuringExecution: 32 | - labelSelector: 33 | matchExpressions: 34 | - key: role 35 | operator: In 36 | values: 37 | - data 38 | topologyKey: "kubernetes.io/hostname" 39 | {{- else if $.Values.affinity.unique }} 40 | # Unique Affinity: Schedule pods on different nodes where there is no coordinator or data pod 41 | podAntiAffinity: 42 | requiredDuringSchedulingIgnoredDuringExecution: 43 | - labelSelector: 44 | matchExpressions: 45 | - key: role 46 | operator: In 47 | values: 48 | - coordinator 49 | - data 50 | topologyKey: "kubernetes.io/hostname" 51 | {{- else if $.Values.affinity.parity }} 52 | # Parity Affinity: One coordinator and one data node per node, coordinator schedules first, needs to be in pairs 53 | podAffinity: 54 | requiredDuringSchedulingIgnoredDuringExecution: 55 | - labelSelector: 56 | matchExpressions: 57 | - key: role 58 | operator: In 59 | values: 60 | - coordinator 61 | topologyKey: "kubernetes.io/hostname" 62 | podAntiAffinity: 63 | requiredDuringSchedulingIgnoredDuringExecution: 64 | - labelSelector: 65 | matchExpressions: 66 | - key: role 67 | operator: In 68 | values: 69 | - data 70 | topologyKey: "kubernetes.io/hostname" 71 | {{- else }} 72 | # Default Affinity: Avoid scheduling on the same node 73 | podAntiAffinity: 74 | preferredDuringSchedulingIgnoredDuringExecution: 75 | - weight: 50 76 | podAffinityTerm: 77 | labelSelector: 78 | matchExpressions: 79 | - key: role 80 | operator: In 81 | values: 82 | - data 83 | topologyKey: "kubernetes.io/hostname" 84 | {{- end }} 85 | 86 | initContainers: 87 | - name: init 88 | image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" 89 | volumeMounts: 90 | - name: memgraph-data-{{ $data.id }}-lib-storage 91 | mountPath: /var/lib/memgraph 92 | - name: memgraph-data-{{ $data.id }}-log-storage 93 | mountPath: /var/log/memgraph 94 | command: [ "/bin/sh","-c" ] 95 | # The permissions have to be explicitly adjusted because under some k8s 96 | # environments permissions set under 97 | # https://github.com/memgraph/memgraph/blob/master/release/debian/postinst 98 | # get overwritten. Sometimes, PVC are created using new partitions -> 99 | # lost+found directory should not change its permissions so it has to 100 | # be excluded. 101 | args: 102 | - > 103 | cd /var/log/memgraph; 104 | find . -path ./lost+found -prune -o -exec chown {{ $.Values.memgraphUserGroupId }} {} +; 105 | cd /var/lib/memgraph; 106 | find . -path ./lost+found -prune -o -exec chown {{ $.Values.memgraphUserGroupId }} {} +; 107 | {{- if $.Values.storage.data.createCoreDumpsClaim }} 108 | cd {{ $.Values.storage.data.coreDumpsMountPath }}; 109 | find . -path ./lost+found -prune -o -exec chown {{ $.Values.memgraphUserGroupId }} {} +; 110 | {{- end }} 111 | securityContext: 112 | readOnlyRootFilesystem: true 113 | runAsUser: 0 # Run as root 114 | capabilities: 115 | drop: [ "ALL" ] 116 | add: [ "CHOWN" ] 117 | {{- if $.Values.sysctlInitContainer.enabled }} 118 | - name: init-sysctl 119 | image: "{{ $.Values.sysctlInitContainer.image.repository }}:{{ $.Values.sysctlInitContainer.image.tag }}" 120 | imagePullPolicy: {{ $.Values.sysctlInitContainer.image.pullPolicy }} 121 | command: ['sh', '-c', 'sysctl -w vm.max_map_count={{ $.Values.sysctlInitContainer.maxMapCount }}'] 122 | securityContext: 123 | privileged: true 124 | runAsUser: 0 125 | {{- end }} 126 | 127 | {{- if $.Values.storage.data.createCoreDumpsClaim }} 128 | - name: init-core-dumps 129 | image: busybox 130 | command: ['/bin/sh', '-c'] 131 | args: 132 | - > 133 | echo '{{ $.Values.storage.data.coreDumpsMountPath }}/core.%e.%p.%t.%s' | tee /proc/sys/kernel/core_pattern; 134 | if [ -d /proc/sys/kernel/yama ]; then echo '0' | tee /proc/sys/kernel/yama/ptrace_scope; fi 135 | securityContext: 136 | privileged: true 137 | runAsUser: 0 138 | {{- end }} 139 | 140 | containers: 141 | - name: memgraph-data 142 | image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" 143 | imagePullPolicy: {{ $.Values.image.pullPolicy }} 144 | ports: 145 | - containerPort: {{ $.Values.ports.boltPort }} 146 | - containerPort: {{ $.Values.ports.managementPort }} 147 | - containerPort: {{ $.Values.ports.replicationPort }} 148 | args: 149 | {{- range $arg := $data.args }} 150 | - "{{ $arg }}" 151 | {{- end }} 152 | env: 153 | {{- if $.Values.secrets.enabled }} 154 | - name: MEMGRAPH_USER 155 | valueFrom: 156 | secretKeyRef: 157 | name: {{ $.Values.secrets.name }} 158 | key: {{ $.Values.secrets.userKey }} 159 | - name: MEMGRAPH_PASSWORD 160 | valueFrom: 161 | secretKeyRef: 162 | name: {{ $.Values.secrets.name }} 163 | key: {{ $.Values.secrets.passwordKey }} 164 | {{- end }} 165 | - name: MEMGRAPH_ENTERPRISE_LICENSE 166 | value: {{ $.Values.env.MEMGRAPH_ENTERPRISE_LICENSE }} 167 | - name: MEMGRAPH_ORGANIZATION_NAME 168 | value: {{ $.Values.env.MEMGRAPH_ORGANIZATION_NAME }} 169 | volumeMounts: 170 | - name: memgraph-data-{{ $data.id }}-lib-storage 171 | mountPath: /var/lib/memgraph 172 | - name: memgraph-data-{{ $data.id }}-log-storage 173 | mountPath: /var/log/memgraph 174 | {{- if $.Values.storage.data.createCoreDumpsClaim }} 175 | - name: memgraph-data-{{ $data.id }}-core-dumps-storage 176 | mountPath: {{ $.Values.storage.data.coreDumpsMountPath }} 177 | {{- end }} 178 | securityContext: 179 | allowPrivilegeEscalation: false 180 | capabilities: 181 | drop: [ "ALL" ] 182 | # Run by 'memgraph' user as specified in the Dockerfile 183 | {{- include "container.data.readinessProbe" $.Values.container.data.readinessProbe | nindent 8 }} 184 | {{- include "container.data.livenessProbe" $.Values.container.data.livenessProbe | nindent 8 }} 185 | {{- include "container.data.startupProbe" $.Values.container.data.startupProbe | nindent 8 }} 186 | {{- with $.Values.resources.data }} 187 | resources: 188 | {{- toYaml . | nindent 10 }} 189 | {{- end }} 190 | 191 | volumeClaimTemplates: 192 | - metadata: 193 | name: memgraph-data-{{ $data.id }}-lib-storage 194 | spec: 195 | accessModes: 196 | - {{ $.Values.storage.data.libStorageAccessMode }} 197 | storageClassName: {{ $.Values.storage.data.libStorageClassName }} 198 | resources: 199 | requests: 200 | storage: {{ $.Values.storage.data.libPVCSize }} 201 | - metadata: 202 | name: memgraph-data-{{ $data.id }}-log-storage 203 | spec: 204 | accessModes: 205 | - {{ $.Values.storage.data.logStorageAccessMode }} 206 | storageClassName: {{ $.Values.storage.data.logStorageClassName }} 207 | resources: 208 | requests: 209 | storage: {{ $.Values.storage.data.logPVCSize }} 210 | 211 | {{- if $.Values.storage.data.createCoreDumpsClaim }} 212 | - metadata: 213 | name: memgraph-data-{{ $data.id }}-core-dumps-storage 214 | spec: 215 | accessModes: 216 | - "ReadWriteOnce" 217 | {{- if $.Values.storage.data.coreDumpsStorageClassName }} 218 | storageClassName: {{ $.Values.storage.data.coreDumpsStorageClassName }} 219 | {{- end }} 220 | resources: 221 | requests: 222 | storage: {{ $.Values.storage.data.coreDumpsStorageSize }} 223 | {{- end }} 224 | --- 225 | {{- end }} 226 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (eq $.Values.externalAccessConfig.dataInstance.serviceType "IngressNginx") (eq $.Values.externalAccessConfig.coordinator.serviceType "IngressNginx") }} 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | labels: 6 | app.kubernetes.io/instance: ingress-nginx 7 | app.kubernetes.io/name: ingress-nginx 8 | name: ingress-nginx 9 | annotations: 10 | # Install the namespace before everything else 11 | "helm.sh/hook": pre-install 12 | "helm.sh/hook-weight": "-5" 13 | "helm.sh/hook-delete-policy": before-hook-creation 14 | --- 15 | apiVersion: v1 16 | kind: ConfigMap 17 | metadata: 18 | name: tcp-services # If you change it, make sure that you change in the ingress-nginx.yaml file too 19 | namespace: ingress-nginx 20 | data: 21 | {{- if eq $.Values.externalAccessConfig.dataInstance.serviceType "IngressNginx"}} 22 | 9000: "default/memgraph-data-0:7687" 23 | 9001: "default/memgraph-data-1:7687" 24 | {{- end }} 25 | {{- if eq $.Values.externalAccessConfig.coordinator.serviceType "IngressNginx"}} 26 | 9011: "default/memgraph-coordinator-1:7687" 27 | 9012: "default/memgraph-coordinator-2:7687" 28 | 9013: "default/memgraph-coordinator-3:7687" 29 | {{- end }} 30 | --- 31 | apiVersion: v1 32 | automountServiceAccountToken: true 33 | kind: ServiceAccount 34 | metadata: 35 | labels: 36 | app.kubernetes.io/component: controller 37 | app.kubernetes.io/instance: ingress-nginx 38 | app.kubernetes.io/name: ingress-nginx 39 | app.kubernetes.io/part-of: ingress-nginx 40 | app.kubernetes.io/version: 1.12.0 41 | name: ingress-nginx 42 | namespace: ingress-nginx 43 | --- 44 | apiVersion: v1 45 | automountServiceAccountToken: true 46 | kind: ServiceAccount 47 | metadata: 48 | labels: 49 | app.kubernetes.io/component: admission-webhook 50 | app.kubernetes.io/instance: ingress-nginx 51 | app.kubernetes.io/name: ingress-nginx 52 | app.kubernetes.io/part-of: ingress-nginx 53 | app.kubernetes.io/version: 1.12.0 54 | name: ingress-nginx-admission 55 | namespace: ingress-nginx 56 | --- 57 | apiVersion: rbac.authorization.k8s.io/v1 58 | kind: Role 59 | metadata: 60 | labels: 61 | app.kubernetes.io/component: controller 62 | app.kubernetes.io/instance: ingress-nginx 63 | app.kubernetes.io/name: ingress-nginx 64 | app.kubernetes.io/part-of: ingress-nginx 65 | app.kubernetes.io/version: 1.12.0 66 | name: ingress-nginx 67 | namespace: ingress-nginx 68 | rules: 69 | - apiGroups: 70 | - "" 71 | resources: 72 | - namespaces 73 | verbs: 74 | - get 75 | - apiGroups: 76 | - "" 77 | resources: 78 | - configmaps 79 | - pods 80 | - secrets 81 | - endpoints 82 | verbs: 83 | - get 84 | - list 85 | - watch 86 | - apiGroups: 87 | - "" 88 | resources: 89 | - services 90 | verbs: 91 | - get 92 | - list 93 | - watch 94 | - apiGroups: 95 | - networking.k8s.io 96 | resources: 97 | - ingresses 98 | verbs: 99 | - get 100 | - list 101 | - watch 102 | - apiGroups: 103 | - networking.k8s.io 104 | resources: 105 | - ingresses/status 106 | verbs: 107 | - update 108 | - apiGroups: 109 | - networking.k8s.io 110 | resources: 111 | - ingressclasses 112 | verbs: 113 | - get 114 | - list 115 | - watch 116 | - apiGroups: 117 | - coordination.k8s.io 118 | resourceNames: 119 | - ingress-nginx-leader 120 | resources: 121 | - leases 122 | verbs: 123 | - get 124 | - update 125 | - apiGroups: 126 | - coordination.k8s.io 127 | resources: 128 | - leases 129 | verbs: 130 | - create 131 | - apiGroups: 132 | - "" 133 | resources: 134 | - events 135 | verbs: 136 | - create 137 | - patch 138 | - apiGroups: 139 | - discovery.k8s.io 140 | resources: 141 | - endpointslices 142 | verbs: 143 | - list 144 | - watch 145 | - get 146 | --- 147 | apiVersion: rbac.authorization.k8s.io/v1 148 | kind: Role 149 | metadata: 150 | labels: 151 | app.kubernetes.io/component: admission-webhook 152 | app.kubernetes.io/instance: ingress-nginx 153 | app.kubernetes.io/name: ingress-nginx 154 | app.kubernetes.io/part-of: ingress-nginx 155 | app.kubernetes.io/version: 1.12.0 156 | name: ingress-nginx-admission 157 | namespace: ingress-nginx 158 | rules: 159 | - apiGroups: 160 | - "" 161 | resources: 162 | - secrets 163 | verbs: 164 | - get 165 | - create 166 | --- 167 | apiVersion: rbac.authorization.k8s.io/v1 168 | kind: ClusterRole 169 | metadata: 170 | labels: 171 | app.kubernetes.io/instance: ingress-nginx 172 | app.kubernetes.io/name: ingress-nginx 173 | app.kubernetes.io/part-of: ingress-nginx 174 | app.kubernetes.io/version: 1.12.0 175 | name: ingress-nginx 176 | rules: 177 | - apiGroups: 178 | - "" 179 | resources: 180 | - configmaps 181 | - endpoints 182 | - nodes 183 | - pods 184 | - secrets 185 | - namespaces 186 | verbs: 187 | - list 188 | - watch 189 | - apiGroups: 190 | - coordination.k8s.io 191 | resources: 192 | - leases 193 | verbs: 194 | - list 195 | - watch 196 | - apiGroups: 197 | - "" 198 | resources: 199 | - nodes 200 | verbs: 201 | - get 202 | - apiGroups: 203 | - "" 204 | resources: 205 | - services 206 | verbs: 207 | - get 208 | - list 209 | - watch 210 | - apiGroups: 211 | - networking.k8s.io 212 | resources: 213 | - ingresses 214 | verbs: 215 | - get 216 | - list 217 | - watch 218 | - apiGroups: 219 | - "" 220 | resources: 221 | - events 222 | verbs: 223 | - create 224 | - patch 225 | - apiGroups: 226 | - networking.k8s.io 227 | resources: 228 | - ingresses/status 229 | verbs: 230 | - update 231 | - apiGroups: 232 | - networking.k8s.io 233 | resources: 234 | - ingressclasses 235 | verbs: 236 | - get 237 | - list 238 | - watch 239 | - apiGroups: 240 | - discovery.k8s.io 241 | resources: 242 | - endpointslices 243 | verbs: 244 | - list 245 | - watch 246 | - get 247 | --- 248 | apiVersion: rbac.authorization.k8s.io/v1 249 | kind: ClusterRole 250 | metadata: 251 | labels: 252 | app.kubernetes.io/component: admission-webhook 253 | app.kubernetes.io/instance: ingress-nginx 254 | app.kubernetes.io/name: ingress-nginx 255 | app.kubernetes.io/part-of: ingress-nginx 256 | app.kubernetes.io/version: 1.12.0 257 | name: ingress-nginx-admission 258 | rules: 259 | - apiGroups: 260 | - admissionregistration.k8s.io 261 | resources: 262 | - validatingwebhookconfigurations 263 | verbs: 264 | - get 265 | - update 266 | --- 267 | apiVersion: rbac.authorization.k8s.io/v1 268 | kind: RoleBinding 269 | metadata: 270 | labels: 271 | app.kubernetes.io/component: controller 272 | app.kubernetes.io/instance: ingress-nginx 273 | app.kubernetes.io/name: ingress-nginx 274 | app.kubernetes.io/part-of: ingress-nginx 275 | app.kubernetes.io/version: 1.12.0 276 | name: ingress-nginx 277 | namespace: ingress-nginx 278 | roleRef: 279 | apiGroup: rbac.authorization.k8s.io 280 | kind: Role 281 | name: ingress-nginx 282 | subjects: 283 | - kind: ServiceAccount 284 | name: ingress-nginx 285 | namespace: ingress-nginx 286 | --- 287 | apiVersion: rbac.authorization.k8s.io/v1 288 | kind: RoleBinding 289 | metadata: 290 | labels: 291 | app.kubernetes.io/component: admission-webhook 292 | app.kubernetes.io/instance: ingress-nginx 293 | app.kubernetes.io/name: ingress-nginx 294 | app.kubernetes.io/part-of: ingress-nginx 295 | app.kubernetes.io/version: 1.12.0 296 | name: ingress-nginx-admission 297 | namespace: ingress-nginx 298 | roleRef: 299 | apiGroup: rbac.authorization.k8s.io 300 | kind: Role 301 | name: ingress-nginx-admission 302 | subjects: 303 | - kind: ServiceAccount 304 | name: ingress-nginx-admission 305 | namespace: ingress-nginx 306 | --- 307 | apiVersion: rbac.authorization.k8s.io/v1 308 | kind: ClusterRoleBinding 309 | metadata: 310 | labels: 311 | app.kubernetes.io/instance: ingress-nginx 312 | app.kubernetes.io/name: ingress-nginx 313 | app.kubernetes.io/part-of: ingress-nginx 314 | app.kubernetes.io/version: 1.12.0 315 | name: ingress-nginx 316 | roleRef: 317 | apiGroup: rbac.authorization.k8s.io 318 | kind: ClusterRole 319 | name: ingress-nginx 320 | subjects: 321 | - kind: ServiceAccount 322 | name: ingress-nginx 323 | namespace: ingress-nginx 324 | --- 325 | apiVersion: rbac.authorization.k8s.io/v1 326 | kind: ClusterRoleBinding 327 | metadata: 328 | labels: 329 | app.kubernetes.io/component: admission-webhook 330 | app.kubernetes.io/instance: ingress-nginx 331 | app.kubernetes.io/name: ingress-nginx 332 | app.kubernetes.io/part-of: ingress-nginx 333 | app.kubernetes.io/version: 1.12.0 334 | name: ingress-nginx-admission 335 | roleRef: 336 | apiGroup: rbac.authorization.k8s.io 337 | kind: ClusterRole 338 | name: ingress-nginx-admission 339 | subjects: 340 | - kind: ServiceAccount 341 | name: ingress-nginx-admission 342 | namespace: ingress-nginx 343 | --- 344 | apiVersion: v1 345 | data: null 346 | kind: ConfigMap 347 | metadata: 348 | labels: 349 | app.kubernetes.io/component: controller 350 | app.kubernetes.io/instance: ingress-nginx 351 | app.kubernetes.io/name: ingress-nginx 352 | app.kubernetes.io/part-of: ingress-nginx 353 | app.kubernetes.io/version: 1.12.0 354 | name: ingress-nginx-controller 355 | namespace: ingress-nginx 356 | --- 357 | apiVersion: v1 358 | kind: Service 359 | metadata: 360 | labels: 361 | app.kubernetes.io/component: controller 362 | app.kubernetes.io/instance: ingress-nginx 363 | app.kubernetes.io/name: ingress-nginx 364 | app.kubernetes.io/part-of: ingress-nginx 365 | app.kubernetes.io/version: 1.12.0 366 | name: ingress-nginx-controller 367 | namespace: ingress-nginx 368 | spec: 369 | externalTrafficPolicy: Local 370 | ipFamilies: 371 | - IPv4 372 | ipFamilyPolicy: SingleStack 373 | ports: 374 | {{- if eq $.Values.externalAccessConfig.dataInstance.serviceType "IngressNginx"}} 375 | - name: tcp-data-0 376 | port: 9000 377 | targetPort: 9000 378 | protocol: TCP 379 | - name: tcp-data-1 380 | port: 9001 381 | targetPort: 9001 382 | protocol: TCP 383 | {{- end }} 384 | {{- if eq $.Values.externalAccessConfig.coordinator.serviceType "IngressNginx"}} 385 | - name: tcp-coord-1 386 | port: 9011 387 | targetPort: 9011 388 | protocol: TCP 389 | - name: tcp-coord-2 390 | port: 9012 391 | targetPort: 9012 392 | protocol: TCP 393 | - name: tcp-coord-3 394 | port: 9013 395 | targetPort: 9013 396 | protocol: TCP 397 | {{- end }} 398 | - appProtocol: http 399 | name: http 400 | port: 80 401 | protocol: TCP 402 | targetPort: http 403 | - appProtocol: https 404 | name: https 405 | port: 443 406 | protocol: TCP 407 | targetPort: https 408 | selector: 409 | app.kubernetes.io/component: controller 410 | app.kubernetes.io/instance: ingress-nginx 411 | app.kubernetes.io/name: ingress-nginx 412 | type: LoadBalancer 413 | --- 414 | apiVersion: v1 415 | kind: Service 416 | metadata: 417 | labels: 418 | app.kubernetes.io/component: controller 419 | app.kubernetes.io/instance: ingress-nginx 420 | app.kubernetes.io/name: ingress-nginx 421 | app.kubernetes.io/part-of: ingress-nginx 422 | app.kubernetes.io/version: 1.12.0 423 | name: ingress-nginx-controller-admission 424 | namespace: ingress-nginx 425 | spec: 426 | ports: 427 | - appProtocol: https 428 | name: https-webhook 429 | port: 443 430 | targetPort: webhook 431 | selector: 432 | app.kubernetes.io/component: controller 433 | app.kubernetes.io/instance: ingress-nginx 434 | app.kubernetes.io/name: ingress-nginx 435 | type: ClusterIP 436 | --- 437 | apiVersion: apps/v1 438 | kind: Deployment 439 | metadata: 440 | labels: 441 | app.kubernetes.io/component: controller 442 | app.kubernetes.io/instance: ingress-nginx 443 | app.kubernetes.io/name: ingress-nginx 444 | app.kubernetes.io/part-of: ingress-nginx 445 | app.kubernetes.io/version: 1.12.0 446 | name: ingress-nginx-controller 447 | namespace: ingress-nginx 448 | spec: 449 | minReadySeconds: 0 450 | revisionHistoryLimit: 10 451 | selector: 452 | matchLabels: 453 | app.kubernetes.io/component: controller 454 | app.kubernetes.io/instance: ingress-nginx 455 | app.kubernetes.io/name: ingress-nginx 456 | strategy: 457 | rollingUpdate: 458 | maxUnavailable: 1 459 | type: RollingUpdate 460 | template: 461 | metadata: 462 | labels: 463 | app.kubernetes.io/component: controller 464 | app.kubernetes.io/instance: ingress-nginx 465 | app.kubernetes.io/name: ingress-nginx 466 | app.kubernetes.io/part-of: ingress-nginx 467 | app.kubernetes.io/version: 1.12.0 468 | spec: 469 | containers: 470 | - args: 471 | - /nginx-ingress-controller 472 | - --tcp-services-configmap=ingress-nginx/tcp-services 473 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller 474 | - --election-id=ingress-nginx-leader 475 | - --controller-class=k8s.io/ingress-nginx 476 | - --ingress-class=nginx 477 | - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller 478 | - --validating-webhook=:8443 479 | - --validating-webhook-certificate=/usr/local/certificates/cert 480 | - --validating-webhook-key=/usr/local/certificates/key 481 | env: 482 | - name: POD_NAME 483 | valueFrom: 484 | fieldRef: 485 | fieldPath: metadata.name 486 | - name: POD_NAMESPACE 487 | valueFrom: 488 | fieldRef: 489 | fieldPath: metadata.namespace 490 | - name: LD_PRELOAD 491 | value: /usr/local/lib/libmimalloc.so 492 | image: registry.k8s.io/ingress-nginx/controller:v1.12.0@sha256:e6b8de175acda6ca913891f0f727bca4527e797d52688cbe9fec9040d6f6b6fa 493 | imagePullPolicy: IfNotPresent 494 | lifecycle: 495 | preStop: 496 | exec: 497 | command: 498 | - /wait-shutdown 499 | livenessProbe: 500 | failureThreshold: 5 501 | httpGet: 502 | path: /healthz 503 | port: 10254 504 | scheme: HTTP 505 | initialDelaySeconds: 10 506 | periodSeconds: 10 507 | successThreshold: 1 508 | timeoutSeconds: 1 509 | name: controller 510 | ports: 511 | - containerPort: 80 512 | name: http 513 | protocol: TCP 514 | - containerPort: 443 515 | name: https 516 | protocol: TCP 517 | - containerPort: 8443 518 | name: webhook 519 | protocol: TCP 520 | readinessProbe: 521 | failureThreshold: 3 522 | httpGet: 523 | path: /healthz 524 | port: 10254 525 | scheme: HTTP 526 | initialDelaySeconds: 10 527 | periodSeconds: 10 528 | successThreshold: 1 529 | timeoutSeconds: 1 530 | resources: 531 | requests: 532 | cpu: 100m 533 | memory: 90Mi 534 | securityContext: 535 | allowPrivilegeEscalation: false 536 | capabilities: 537 | add: 538 | - NET_BIND_SERVICE 539 | drop: 540 | - ALL 541 | readOnlyRootFilesystem: false 542 | runAsGroup: 82 543 | runAsNonRoot: true 544 | runAsUser: 101 545 | seccompProfile: 546 | type: RuntimeDefault 547 | volumeMounts: 548 | - mountPath: /usr/local/certificates/ 549 | name: webhook-cert 550 | readOnly: true 551 | dnsPolicy: ClusterFirst 552 | nodeSelector: 553 | kubernetes.io/os: linux 554 | serviceAccountName: ingress-nginx 555 | terminationGracePeriodSeconds: 300 556 | volumes: 557 | - name: webhook-cert 558 | secret: 559 | secretName: ingress-nginx-admission 560 | --- 561 | apiVersion: batch/v1 562 | kind: Job 563 | metadata: 564 | labels: 565 | app.kubernetes.io/component: admission-webhook 566 | app.kubernetes.io/instance: ingress-nginx 567 | app.kubernetes.io/name: ingress-nginx 568 | app.kubernetes.io/part-of: ingress-nginx 569 | app.kubernetes.io/version: 1.12.0 570 | name: ingress-nginx-admission-create 571 | namespace: ingress-nginx 572 | spec: 573 | template: 574 | metadata: 575 | labels: 576 | app.kubernetes.io/component: admission-webhook 577 | app.kubernetes.io/instance: ingress-nginx 578 | app.kubernetes.io/name: ingress-nginx 579 | app.kubernetes.io/part-of: ingress-nginx 580 | app.kubernetes.io/version: 1.12.0 581 | name: ingress-nginx-admission-create 582 | spec: 583 | containers: 584 | - args: 585 | - create 586 | - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc 587 | - --namespace=$(POD_NAMESPACE) 588 | - --secret-name=ingress-nginx-admission 589 | env: 590 | - name: POD_NAMESPACE 591 | valueFrom: 592 | fieldRef: 593 | fieldPath: metadata.namespace 594 | image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4 595 | imagePullPolicy: IfNotPresent 596 | name: create 597 | securityContext: 598 | allowPrivilegeEscalation: false 599 | capabilities: 600 | drop: 601 | - ALL 602 | readOnlyRootFilesystem: true 603 | runAsGroup: 65532 604 | runAsNonRoot: true 605 | runAsUser: 65532 606 | seccompProfile: 607 | type: RuntimeDefault 608 | nodeSelector: 609 | kubernetes.io/os: linux 610 | restartPolicy: OnFailure 611 | serviceAccountName: ingress-nginx-admission 612 | --- 613 | apiVersion: batch/v1 614 | kind: Job 615 | metadata: 616 | labels: 617 | app.kubernetes.io/component: admission-webhook 618 | app.kubernetes.io/instance: ingress-nginx 619 | app.kubernetes.io/name: ingress-nginx 620 | app.kubernetes.io/part-of: ingress-nginx 621 | app.kubernetes.io/version: 1.12.0 622 | name: ingress-nginx-admission-patch 623 | namespace: ingress-nginx 624 | spec: 625 | template: 626 | metadata: 627 | labels: 628 | app.kubernetes.io/component: admission-webhook 629 | app.kubernetes.io/instance: ingress-nginx 630 | app.kubernetes.io/name: ingress-nginx 631 | app.kubernetes.io/part-of: ingress-nginx 632 | app.kubernetes.io/version: 1.12.0 633 | name: ingress-nginx-admission-patch 634 | spec: 635 | containers: 636 | - args: 637 | - patch 638 | - --webhook-name=ingress-nginx-admission 639 | - --namespace=$(POD_NAMESPACE) 640 | - --patch-mutating=false 641 | - --secret-name=ingress-nginx-admission 642 | - --patch-failure-policy=Fail 643 | env: 644 | - name: POD_NAMESPACE 645 | valueFrom: 646 | fieldRef: 647 | fieldPath: metadata.namespace 648 | image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4 649 | imagePullPolicy: IfNotPresent 650 | name: patch 651 | securityContext: 652 | allowPrivilegeEscalation: false 653 | capabilities: 654 | drop: 655 | - ALL 656 | readOnlyRootFilesystem: true 657 | runAsGroup: 65532 658 | runAsNonRoot: true 659 | runAsUser: 65532 660 | seccompProfile: 661 | type: RuntimeDefault 662 | nodeSelector: 663 | kubernetes.io/os: linux 664 | restartPolicy: OnFailure 665 | serviceAccountName: ingress-nginx-admission 666 | --- 667 | apiVersion: networking.k8s.io/v1 668 | kind: IngressClass 669 | metadata: 670 | labels: 671 | app.kubernetes.io/component: controller 672 | app.kubernetes.io/instance: ingress-nginx 673 | app.kubernetes.io/name: ingress-nginx 674 | app.kubernetes.io/part-of: ingress-nginx 675 | app.kubernetes.io/version: 1.12.0 676 | name: nginx 677 | spec: 678 | controller: k8s.io/ingress-nginx 679 | --- 680 | apiVersion: admissionregistration.k8s.io/v1 681 | kind: ValidatingWebhookConfiguration 682 | metadata: 683 | labels: 684 | app.kubernetes.io/component: admission-webhook 685 | app.kubernetes.io/instance: ingress-nginx 686 | app.kubernetes.io/name: ingress-nginx 687 | app.kubernetes.io/part-of: ingress-nginx 688 | app.kubernetes.io/version: 1.12.0 689 | name: ingress-nginx-admission 690 | webhooks: 691 | - admissionReviewVersions: 692 | - v1 693 | clientConfig: 694 | service: 695 | name: ingress-nginx-controller-admission 696 | namespace: ingress-nginx 697 | path: /networking/v1/ingresses 698 | port: 443 699 | failurePolicy: Fail 700 | matchPolicy: Equivalent 701 | name: validate.nginx.ingress.kubernetes.io 702 | rules: 703 | - apiGroups: 704 | - networking.k8s.io 705 | apiVersions: 706 | - v1 707 | operations: 708 | - CREATE 709 | - UPDATE 710 | resources: 711 | - ingresses 712 | sideEffects: None 713 | {{- end }} 714 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/mg-exporter.yaml: -------------------------------------------------------------------------------- 1 | {{- if $.Values.prometheus.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: mg-exporter-config 6 | namespace: {{ $.Values.prometheus.namespace }} 7 | data: 8 | ha_config.yaml: | 9 | exporter: 10 | port: {{ $.Values.prometheus.memgraphExporter.port }} 11 | pull_frequency_seconds: {{ $.Values.prometheus.memgraphExporter.pullFrequencySeconds }} 12 | instances: 13 | # TODO: (andi) Needs to be added to the section about upgrading, try to templatize 14 | - name: coord1 15 | url: http://memgraph-coordinator-1.default.svc.cluster.local 16 | port: 9091 17 | type: coordinator 18 | - name: coord2 19 | url: http://memgraph-coordinator-2.default.svc.cluster.local 20 | port: 9091 21 | type: coordinator 22 | - name: coord3 23 | url: http://memgraph-coordinator-3.default.svc.cluster.local 24 | port: 9091 25 | type: coordinator 26 | - name: data1 27 | url: http://memgraph-data-0.default.svc.cluster.local 28 | port: 9091 29 | type: data_instance 30 | - name: data2 31 | url: http://memgraph-data-1.default.svc.cluster.local 32 | port: 9091 33 | type: data_instance 34 | --- 35 | apiVersion: apps/v1 36 | kind: Deployment 37 | metadata: 38 | name: mg-exporter 39 | namespace: {{ $.Values.prometheus.namespace }} 40 | labels: 41 | app: mg-exporter 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: mg-exporter 47 | template: 48 | metadata: 49 | labels: 50 | app: mg-exporter 51 | spec: 52 | containers: 53 | - name: exporter 54 | image: {{ $.Values.prometheus.memgraphExporter.repository }}:{{ $.Values.prometheus.memgraphExporter.tag }} 55 | volumeMounts: 56 | - name: config-volume 57 | mountPath: /etc/mg-exporter/ha_config.yaml 58 | subPath: ha_config.yaml 59 | ports: 60 | - containerPort: {{ $.Values.prometheus.memgraphExporter.port }} 61 | env: 62 | - name: DEPLOYMENT_TYPE 63 | value: HA # HA stands for high availability 64 | - name: CONFIG_FILE 65 | value: /etc/mg-exporter/ha_config.yaml 66 | volumes: 67 | - name: config-volume 68 | configMap: 69 | name: mg-exporter-config 70 | --- 71 | apiVersion: v1 72 | kind: Service 73 | metadata: 74 | name: mg-exporter 75 | namespace: {{ $.Values.prometheus.namespace }} 76 | labels: 77 | app: mg-exporter 78 | spec: 79 | selector: 80 | app: mg-exporter 81 | ports: 82 | - protocol: TCP 83 | name: tcp-metrics-port 84 | port: {{ $.Values.prometheus.memgraphExporter.port }} 85 | targetPort: {{ $.Values.prometheus.memgraphExporter.port }} 86 | --- 87 | apiVersion: monitoring.coreos.com/v1 88 | kind: ServiceMonitor 89 | metadata: 90 | name: mg-exporter 91 | namespace: {{ $.Values.prometheus.namespace }} 92 | labels: 93 | release: {{ $.Values.prometheus.serviceMonitor.kubePrometheusStackReleaseName }} 94 | spec: 95 | selector: 96 | matchLabels: 97 | app: mg-exporter 98 | endpoints: 99 | - port: tcp-metrics-port # must be the same as the service port name 100 | interval: {{ $.Values.prometheus.serviceMonitor.interval }} 101 | namespaceSelector: 102 | matchNames: 103 | - {{ $.Values.prometheus.namespace }} # This refers to where our service exposing the exporter is located 104 | {{- end }} 105 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/services-coordinators-external.yaml: -------------------------------------------------------------------------------- 1 | {{- $validCoordServices := list "" "CommonLoadBalancer" "LoadBalancer" "NodePort" "IngressNginx" }} 2 | {{- if not (has .Values.externalAccessConfig.coordinator.serviceType $validCoordServices) }} 3 | {{- fail "Invalid value for externalAccessConfig.coordinator.serviceType. Use '', 'CommonLoadBalancer', 'LoadBalancer', 'NodePort' or 'IngressNginx'." }} 4 | {{- end }} 5 | 6 | {{- if or (eq $.Values.externalAccessConfig.coordinator.serviceType "IngressNginx") (eq $.Values.externalAccessConfig.coordinator.serviceType "") }} 7 | {{- /* No external service rendered for IngressNginx or empty string */}} 8 | {{- else if eq $.Values.externalAccessConfig.coordinator.serviceType "CommonLoadBalancer" }} 9 | apiVersion: v1 10 | kind: Service 11 | metadata: 12 | name: coordinators 13 | {{- with $.Values.externalAccessConfig.coordinator.annotations }} 14 | annotations: 15 | {{- toYaml . | nindent 4 }} 16 | {{- end }} 17 | spec: 18 | type: LoadBalancer 19 | selector: 20 | role: coordinator 21 | ports: 22 | - protocol: TCP 23 | name: tcp-bolt-port 24 | port: {{ $.Values.ports.boltPort }} 25 | targetPort: {{ $.Values.ports.boltPort }} 26 | {{- else }} 27 | {{- range .Values.coordinators }} 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: memgraph-coordinator-{{ .id }}-external 33 | {{- with $.Values.externalAccessConfig.coordinator.annotations }} 34 | annotations: 35 | {{- toYaml . | nindent 4 }} 36 | {{- end }} 37 | spec: 38 | {{- if eq $.Values.externalAccessConfig.coordinator.serviceType "LoadBalancer" }} 39 | type: LoadBalancer 40 | {{- else if eq $.Values.externalAccessConfig.coordinator.serviceType "NodePort" }} 41 | type: NodePort 42 | {{- end }} 43 | selector: 44 | app: memgraph-coordinator-{{ .id }} 45 | ports: 46 | - protocol: TCP 47 | name: tcp-bolt-port 48 | port: {{ $.Values.ports.boltPort }} 49 | targetPort: {{ $.Values.ports.boltPort }} 50 | {{- end }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/services-coordinators.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.coordinators }} 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: memgraph-coordinator-{{ .id }} 7 | spec: 8 | {{- if $.Values.headlessService.enabled }} 9 | clusterIP: None 10 | {{- end }} 11 | selector: 12 | app: memgraph-coordinator-{{ .id }} 13 | ports: 14 | - protocol: TCP 15 | name: tcp-bolt-port 16 | port: {{ $.Values.ports.boltPort }} 17 | targetPort: {{ $.Values.ports.boltPort }} 18 | - protocol: TCP 19 | name: tcp-coordinator-port 20 | port: {{ $.Values.ports.coordinatorPort }} 21 | targetPort: {{ $.Values.ports.coordinatorPort }} 22 | - protocol: TCP 23 | name: tcp-management-port 24 | port: {{ $.Values.ports.managementPort }} 25 | targetPort: {{ $.Values.ports.managementPort }} 26 | {{- if $.Values.prometheus.enabled }} 27 | - protocol: TCP 28 | name: tcp-metrics-port 29 | port: 9091 30 | targetPort: 9091 31 | {{- end }} 32 | 33 | {{- end }} 34 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/services-data-external.yaml: -------------------------------------------------------------------------------- 1 | {{- $validDataServices := list "" "LoadBalancer" "NodePort" "IngressNginx" }} 2 | {{- if not (has .Values.externalAccessConfig.dataInstance.serviceType $validDataServices) }} 3 | {{- fail "Invalid value for externalAccessConfig.dataInstance.serviceType. Use '', 'LoadBalancer', 'NodePort', or 'IngressNginx'." }} 4 | {{- end }} 5 | 6 | {{- if or (eq $.Values.externalAccessConfig.dataInstance.serviceType "IngressNginx") (eq $.Values.externalAccessConfig.dataInstance.serviceType "") }} 7 | {{- /* No external service rendered for IngressNginx or empty string */}} 8 | {{- else }} 9 | {{- range .Values.data }} 10 | --- 11 | apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: memgraph-data-{{ .id }}-external 15 | {{- with $.Values.externalAccessConfig.dataInstance.annotations }} 16 | annotations: 17 | {{- toYaml . | nindent 4 }} 18 | {{- end }} 19 | spec: 20 | {{- if eq $.Values.externalAccessConfig.dataInstance.serviceType "NodePort" }} 21 | type: NodePort 22 | {{- else if eq $.Values.externalAccessConfig.dataInstance.serviceType "LoadBalancer" }} 23 | type: LoadBalancer 24 | {{- end }} 25 | selector: 26 | app: memgraph-data-{{ .id }} 27 | ports: 28 | - protocol: TCP 29 | name: tcp-bolt-port 30 | port: {{ $.Values.ports.boltPort }} 31 | targetPort: {{ $.Values.ports.boltPort }} 32 | {{- end }} 33 | {{- end }} 34 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/templates/services-data.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.data }} 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: memgraph-data-{{ .id }} 7 | spec: 8 | {{- if $.Values.headlessService.enabled }} 9 | clusterIP: None 10 | {{- end }} 11 | selector: 12 | app: memgraph-data-{{ .id }} 13 | ports: 14 | - protocol: TCP 15 | name: tcp-bolt-port 16 | port: {{ $.Values.ports.boltPort }} 17 | targetPort: {{ $.Values.ports.boltPort }} 18 | - protocol: TCP 19 | name: tcp-management-port 20 | port: {{ $.Values.ports.managementPort }} 21 | targetPort: {{ $.Values.ports.managementPort }} 22 | - protocol: TCP 23 | name: tcp-replication-port 24 | port: {{ $.Values.ports.replicationPort }} 25 | targetPort: {{ $.Values.ports.replicationPort }} 26 | {{- if $.Values.prometheus.enabled }} 27 | - protocol: TCP 28 | name: tcp-metrics-port 29 | port: 9091 30 | targetPort: 9091 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /charts/memgraph-high-availability/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: memgraph/memgraph 3 | # It is a bad practice to set the image tag name to latest as it can trigger automatic upgrade of the charts 4 | # With some of the pullPolicy values. Please consider fixing the tag to a specific Memgraph version 5 | tag: 3.3.0 6 | pullPolicy: IfNotPresent 7 | 8 | env: 9 | MEMGRAPH_ENTERPRISE_LICENSE: "" 10 | MEMGRAPH_ORGANIZATION_NAME: "" 11 | 12 | storage: 13 | data: 14 | libPVCSize: "1Gi" 15 | libStorageAccessMode: "ReadWriteOnce" 16 | # By default the name of the storage class isn't set which means that the default storage class will be used. 17 | # If you set any name, such storage class must exist. 18 | libStorageClassName: 19 | logPVCSize: "1Gi" 20 | logStorageAccessMode: "ReadWriteOnce" 21 | logStorageClassName: 22 | ## Create a Persistant Volume Claim for core dumps. 23 | createCoreDumpsClaim: false 24 | coreDumpsStorageClassName: 25 | coreDumpsStorageSize: 10Gi 26 | coreDumpsMountPath: /var/core/memgraph 27 | coordinators: 28 | libPVCSize: "1Gi" 29 | libStorageAccessMode: "ReadWriteOnce" 30 | # By default the name of the storage class isn't set which means that the default storage class will be used. 31 | # If you set any name, such storage class must exist. 32 | libStorageClassName: 33 | logPVCSize: "1Gi" 34 | logStorageAccessMode: "ReadWriteOnce" 35 | logStorageClassName: 36 | ## Create a Persistant Volume Claim for core dumps. 37 | createCoreDumpsClaim: false 38 | coreDumpsStorageClassName: 39 | coreDumpsStorageSize: 10Gi 40 | coreDumpsMountPath: /var/core/memgraph 41 | 42 | ports: 43 | boltPort: 7687 # If you change this value, change it also in probes definition 44 | managementPort: 10000 45 | replicationPort: 20000 46 | coordinatorPort: 12000 # If you change this value, change it also in probes definition 47 | 48 | externalAccessConfig: 49 | dataInstance: 50 | # Empty = no external access service will be created 51 | serviceType: "" 52 | annotations: {} 53 | coordinator: 54 | # Empty = no external access service will be created 55 | serviceType: "" 56 | annotations: {} 57 | 58 | headlessService: 59 | enabled: false # If set to true, each data and coordinator instance will use headless service 60 | 61 | # Affinity controls the scheduling of the memgraph-high-availability pods. 62 | # By default data pods will avoid being scheduled on the same node as other data pods, 63 | # and coordinator pods will avoid being scheduled on the same node as other coordinator pods. 64 | # Deployment won't fail if there is no sufficient nodes. 65 | affinity: 66 | # The unique affinity, will schedule the pods on different nodes in the cluster. 67 | # This means coordinators and data nodes will not be scheduled on the same node. If there are more pods than nodes, deployment will fail. 68 | unique: false 69 | # The parity affinity, will enable scheduling of the pods on the same node, but with the rule that one node can host pair made of coordinator and data node. 70 | # This means each node can have max two pods, one coordinator and one data node. If not sufficient nodes, deployment will fail. 71 | parity: false 72 | # The nodeSelection affinity, will enable scheduling of the pods on the nodes with specific labels. So the coordinators will be scheduled on the nodes with label coordinator-node and data nodes will be scheduled on the nodes with label data-node. If not sufficient nodes, deployment will fail. 73 | nodeSelection: false 74 | roleLabelKey: "role" 75 | dataNodeLabelValue: "data-node" 76 | coordinatorNodeLabelValue: "coordinator-node" 77 | 78 | # If you are experiencing issues with the sysctlInitContainer, you can disable it here. 79 | # This is made to increase the max_map_count, necessary for high memory loads in Memgraph 80 | # If you are experiencing crashing pod with the: Max virtual memory areas vm.max_map_count is too low 81 | # you can increase the maxMapCount value. 82 | # You can see what's the proper value for this parameter by reading 83 | # https://memgraph.com/docs/database-management/system-configuration#recommended-values-for-the-vmmax_map_count-parameter 84 | sysctlInitContainer: 85 | enabled: true 86 | maxMapCount: 262144 87 | image: 88 | repository: library/busybox 89 | tag: latest 90 | pullPolicy: IfNotPresent 91 | 92 | # The explicit user and group setup is required because at the init container 93 | # time, there is not yet a user created. This seems fine because under both 94 | # Memgraph and Mage images we actually hard-code the user and group id. The 95 | # config is used to chown user storage and core dumps claims' month paths. 96 | memgraphUserGroupId: "101:103" 97 | 98 | secrets: 99 | enabled: false 100 | name: memgraph-secrets 101 | userKey: USER 102 | passwordKey: PASSWORD 103 | 104 | container: 105 | data: 106 | readinessProbe: 107 | tcpSocket: 108 | port: 7687 # If you change bolt port, change this also 109 | failureThreshold: 20 110 | timeoutSeconds: 10 111 | periodSeconds: 5 112 | livenessProbe: 113 | tcpSocket: 114 | port: 7687 # If you change bolt port, change this also 115 | failureThreshold: 20 116 | timeoutSeconds: 10 117 | periodSeconds: 5 118 | # When restoring Memgraph from a backup, it is important to give enough time app to start. Here, we set it to 2h by default. 119 | startupProbe: 120 | tcpSocket: 121 | port: 7687 # If you change bolt port, change this also 122 | failureThreshold: 1440 123 | timeoutSeconds: 10 124 | periodSeconds: 5 125 | coordinators: 126 | readinessProbe: 127 | tcpSocket: 128 | port: 12000 # If you change coordinator port, change this also 129 | failureThreshold: 20 130 | timeoutSeconds: 10 131 | periodSeconds: 5 132 | livenessProbe: 133 | tcpSocket: 134 | port: 12000 # If you change coordinator port, change this also 135 | failureThreshold: 20 136 | timeoutSeconds: 10 137 | periodSeconds: 5 138 | startupProbe: 139 | tcpSocket: 140 | port: 12000 141 | failureThreshold: 20 142 | timeoutSeconds: 10 143 | periodSeconds: 5 144 | 145 | resources: 146 | data: {} 147 | coordinators: {} 148 | 149 | prometheus: 150 | enabled: false 151 | namespace: monitoring # Namespace where K8s resources from mg-exporter.yaml will be installed and where your kube-prometheus-stack chart is installed 152 | memgraphExporter: 153 | port: 9115 154 | pullFrequencySeconds: 5 155 | repository: memgraph/prometheus-exporter 156 | tag: 0.2.1 157 | serviceMonitor: 158 | kubePrometheusStackReleaseName: kube-prometheus-stack 159 | interval: 15s 160 | 161 | # If setting the --memory-limit flag under data instances, check that the amount of resources that a pod has been given is more than the actual memory limit you give to Memgraph 162 | # Setting the Memgraph's memory limit to more than the available resources can trigger pod eviction and restarts before Memgraph can make a query exception and continue running 163 | # the pod. 164 | data: 165 | - id: "0" 166 | args: 167 | - "--management-port=10000" 168 | - "--bolt-port=7687" 169 | - "--also-log-to-stderr" 170 | - "--log-level=TRACE" 171 | - "--log-file=/var/log/memgraph/memgraph.log" 172 | 173 | - id: "1" 174 | args: 175 | - "--management-port=10000" 176 | - "--bolt-port=7687" 177 | - "--also-log-to-stderr" 178 | - "--log-level=TRACE" 179 | - "--log-file=/var/log/memgraph/memgraph.log" 180 | 181 | coordinators: 182 | - id: "1" 183 | args: 184 | - "--coordinator-id=1" 185 | - "--coordinator-port=12000" 186 | - "--management-port=10000" 187 | - "--bolt-port=7687" 188 | - "--also-log-to-stderr" 189 | - "--log-level=TRACE" 190 | - "--coordinator-hostname=memgraph-coordinator-1.default.svc.cluster.local" 191 | - "--log-file=/var/log/memgraph/memgraph.log" 192 | - "--nuraft-log-file=/var/log/memgraph/memgraph.log" 193 | 194 | - id: "2" 195 | args: 196 | - "--coordinator-id=2" 197 | - "--coordinator-port=12000" 198 | - "--management-port=10000" 199 | - "--bolt-port=7687" 200 | - "--also-log-to-stderr" 201 | - "--log-level=TRACE" 202 | - "--coordinator-hostname=memgraph-coordinator-2.default.svc.cluster.local" 203 | - "--log-file=/var/log/memgraph/memgraph.log" 204 | - "--nuraft-log-file=/var/log/memgraph/memgraph.log" 205 | 206 | - id: "3" 207 | args: 208 | - "--coordinator-id=3" 209 | - "--coordinator-port=12000" 210 | - "--management-port=10000" 211 | - "--bolt-port=7687" 212 | - "--also-log-to-stderr" 213 | - "--log-level=TRACE" 214 | - "--coordinator-hostname=memgraph-coordinator-3.default.svc.cluster.local" 215 | - "--log-file=/var/log/memgraph/memgraph.log" 216 | - "--nuraft-log-file=/var/log/memgraph/memgraph.log" 217 | -------------------------------------------------------------------------------- /charts/memgraph-lab/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/memgraph-lab/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: memgraph-lab 3 | home: https://memgraph.com/ 4 | type: application 5 | # Chart version, should be incremented each time the chart changes, including appVersion. 6 | version: 0.1.8 7 | # Version number of the docker image memgraph/lab. 8 | # Use it with quotes. 9 | appVersion: "3.3.0" 10 | description: Memgraph Lab Helm Chart 11 | keywords: 12 | - graph 13 | - database 14 | - cypher 15 | - analytics 16 | icon: https://public-assets.memgraph.com/memgraph-logo/logo-large.png 17 | sources: 18 | - https://github.com/memgraph/memgraph 19 | maintainers: 20 | - name: Memgraph 21 | email: tech@memgraph.com 22 | -------------------------------------------------------------------------------- /charts/memgraph-lab/README.md: -------------------------------------------------------------------------------- 1 | ## Memgraph Lab Kubernetes Helm Chart 2 | A Helm Chart for deploying Memgraph Lab on Kubernetes. 3 | 4 | ## Installing the Memgraph Lab Helm Chart 5 | To install the Memgraph Lab Helm Chart, follow the steps below: 6 | ``` 7 | helm install memgraph/memgraph-lab 8 | ``` 9 | Replace `` with a name of your choice for the release. 10 | 11 | ## Changing the default chart values 12 | To change the default chart values, run the command with the specified set of flags: 13 | ``` 14 | helm install memgraph/memgraph-lab --set =,=,... 15 | ``` 16 | Or you can modify a `values.yaml` file and override the desired values: 17 | ``` 18 | helm install memgraph/memgraph-lab -f values.yaml 19 | ``` 20 | 21 | ## Configuration Options 22 | 23 | The following table lists the configurable parameters of the Memgraph Lab chart and their default values. 24 | 25 | | Parameter | Description | Default | 26 | | ---------------------------- | ------------------------------------------------------------------------------------------------------- | -------------------------------------- | 27 | | `image.repository` | Memgraph Lab Docker image repository | `memgraph/memgraph-lab` | 28 | | `image.tag` | Specific tag for the Memgraph Lab Docker image. Overrides the image tag whose default is chart version. | `""` (Defaults to chart's app version) | 29 | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | 30 | | `replicaCount` | Number of Memgraph Lab instances to run. | `1` | 31 | | `service.type` | Kubernetes service type | `ClusterIP` | 32 | | `service.port` | Kubernetes service port | `3000` | 33 | | `service.targetPort` | Kubernetes service target port | `3000` | 34 | | `service.protocol` | Protocol used by the service | `TCP` | 35 | | `service.annotations` | Annotations to add to the service | `{}` | 36 | | `podAnnotations` | Annotations to add to the pod | `{}` | 37 | | `resources` | CPU/Memory resource requests/limits. Left empty by default. | `{}` (See note on uncommenting) | 38 | | `serviceAccount.create` | Specifies whether a service account should be created | `true` | 39 | | `serviceAccount.annotations` | Annotations to add to the service account | `{}` | 40 | | `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated. | `""` | 41 | 42 | Memgraph Lab can be further configured with environment variables in your `values.yaml` file. 43 | 44 | ```yaml 45 | env: 46 | - name: QUICK_CONNECT_MG_HOST 47 | value: memgraph 48 | - name: QUICK_CONNECT_MG_PORT 49 | value: "7687" 50 | - name: BASE_PATH 51 | value: / 52 | ``` 53 | Refer to the [Memgraph Lab documentation](https://memgraph.com/docs/data-visualization) for details on how to connect to and interact with Memgraph. 54 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing Memgraph Lab! 🎉 2 | 3 | 1. Get the application URL by running these commands: 4 | {{- if .Values.ingress.enabled }} 5 | {{- range $host := .Values.ingress.hosts }} 6 | {{- range .paths }} 7 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} 8 | {{- end }} 9 | {{- end }} 10 | {{- else if contains "NodePort" .Values.service.type }} 11 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "memgraph-lab.fullname" . }}) 12 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 13 | echo http://$NODE_IP:$NODE_PORT 14 | {{- else if contains "LoadBalancer" .Values.service.type }} 15 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 16 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "memgraph-lab.fullname" . }}' 17 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "memgraph-lab.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 18 | echo http://$SERVICE_IP:{{ .Values.service.port }} 19 | {{- else if contains "ClusterIP" .Values.service.type }} 20 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "memgraph-lab.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 21 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 22 | echo "Visit http://127.0.0.1:8080 to use your application" 23 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 24 | {{- end }} 25 | 26 | 2. Specify your MemgraphDB instance IP address and port in Memgraph Lab GUI. 27 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "memgraph-lab.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "memgraph-lab.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "memgraph-lab.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "memgraph-lab.labels" -}} 37 | helm.sh/chart: {{ include "memgraph-lab.chart" . }} 38 | {{ include "memgraph-lab.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "memgraph-lab.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "memgraph-lab.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "memgraph-lab.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "memgraph-lab.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | 64 | {{/* 65 | Get the base path env value 66 | */}} 67 | {{- define "getBasePath" -}} 68 | {{- range .Values.env }} 69 | {{- if eq .name "BASE_PATH" }} 70 | {{- .value }} 71 | {{- end }} 72 | {{- end }} 73 | {{- end }} 74 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "memgraph-lab.fullname" . }} 5 | labels: 6 | {{- include "memgraph-lab.labels" . | nindent 4 }} 7 | spec: 8 | {{- if not .Values.autoscaling.enabled }} 9 | replicas: {{ .Values.replicaCount }} 10 | {{- end }} 11 | selector: 12 | matchLabels: 13 | {{- include "memgraph-lab.selectorLabels" . | nindent 6 }} 14 | template: 15 | metadata: 16 | {{- with .Values.podAnnotations }} 17 | annotations: 18 | {{- toYaml . | nindent 8 }} 19 | {{- end }} 20 | labels: 21 | {{- include "memgraph-lab.selectorLabels" . | nindent 8 }} 22 | spec: 23 | {{- with .Values.imagePullSecrets }} 24 | imagePullSecrets: 25 | {{- toYaml . | nindent 8 }} 26 | {{- end }} 27 | serviceAccountName: {{ include "memgraph-lab.serviceAccountName" . }} 28 | securityContext: 29 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 30 | containers: 31 | - name: {{ .Chart.Name }} 32 | securityContext: 33 | {{- toYaml .Values.securityContext | nindent 12 }} 34 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 35 | imagePullPolicy: {{ .Values.image.pullPolicy }} 36 | ports: 37 | - name: http 38 | containerPort: {{ .Values.service.port }} 39 | protocol: TCP 40 | livenessProbe: 41 | httpGet: 42 | path: {{ include "getBasePath" . }}/check 43 | port: http 44 | readinessProbe: 45 | httpGet: 46 | path: {{ include "getBasePath" . }}/check 47 | port: http 48 | resources: 49 | {{- toYaml .Values.resources | nindent 12 }} 50 | {{- with .Values.env }} 51 | env: 52 | {{- range . }} 53 | - name: {{ .name }} 54 | value: "{{ .value }}" 55 | {{- end }} 56 | {{- end }} 57 | 58 | {{- with .Values.nodeSelector }} 59 | nodeSelector: 60 | {{- toYaml . | nindent 8 }} 61 | {{- end }} 62 | {{- with .Values.affinity }} 63 | affinity: 64 | {{- toYaml . | nindent 8 }} 65 | {{- end }} 66 | {{- with .Values.tolerations }} 67 | tolerations: 68 | {{- toYaml . | nindent 8 }} 69 | {{- end }} 70 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.autoscaling.enabled }} 2 | apiVersion: autoscaling/v2 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: {{ include "memgraph-lab.fullname" . }} 6 | labels: 7 | {{- include "memgraph-lab.labels" . | nindent 4 }} 8 | spec: 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: {{ include "memgraph-lab.fullname" . }} 13 | minReplicas: {{ .Values.autoscaling.minReplicas }} 14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 15 | metrics: 16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 17 | - type: Resource 18 | resource: 19 | name: cpu 20 | target: 21 | type: Utilization 22 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 23 | {{- end }} 24 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 25 | - type: Resource 26 | resource: 27 | name: memory 28 | target: 29 | type: Utilization 30 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "memgraph-lab.fullname" . -}} 3 | {{- $svcPort := .Values.service.port -}} 4 | {{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} 5 | {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} 6 | {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} 7 | {{- end }} 8 | {{- end }} 9 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} 10 | apiVersion: networking.k8s.io/v1 11 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} 12 | apiVersion: networking.k8s.io/v1beta1 13 | {{- else -}} 14 | apiVersion: extensions/v1beta1 15 | {{- end }} 16 | kind: Ingress 17 | metadata: 18 | name: {{ $fullName }} 19 | labels: 20 | {{- include "memgraph-lab.labels" . | nindent 4 }} 21 | {{- with .Values.ingress.annotations }} 22 | annotations: 23 | {{- toYaml . | nindent 4 }} 24 | {{- end }} 25 | spec: 26 | {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} 27 | ingressClassName: {{ .Values.ingress.className }} 28 | {{- end }} 29 | {{- if .Values.ingress.tls }} 30 | tls: 31 | {{- range .Values.ingress.tls }} 32 | - hosts: 33 | {{- range .hosts }} 34 | - {{ . | quote }} 35 | {{- end }} 36 | secretName: {{ .secretName }} 37 | {{- end }} 38 | {{- end }} 39 | rules: 40 | {{- range .Values.ingress.hosts }} 41 | - host: {{ .host | quote }} 42 | http: 43 | paths: 44 | {{- range .paths }} 45 | - path: {{ .path }} 46 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} 47 | pathType: {{ .pathType }} 48 | {{- end }} 49 | backend: 50 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} 51 | service: 52 | name: {{ $fullName }} 53 | port: 54 | number: {{ $svcPort }} 55 | {{- else }} 56 | serviceName: {{ $fullName }} 57 | servicePort: {{ $svcPort }} 58 | {{- end }} 59 | {{- end }} 60 | {{- end }} 61 | {{- end }} 62 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "memgraph-lab.fullname" . }} 5 | labels: 6 | {{- include "memgraph-lab.labels" . | nindent 4 }} 7 | {{- with .Values.service.annotations }} 8 | annotations: 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | spec: 12 | type: {{ .Values.service.type }} 13 | ports: 14 | - port: {{ .Values.service.port }} 15 | targetPort: http 16 | protocol: TCP 17 | name: http 18 | selector: 19 | {{- include "memgraph-lab.selectorLabels" . | nindent 4 }} 20 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "memgraph-lab.serviceAccountName" . }} 6 | labels: 7 | {{- include "memgraph-lab.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /charts/memgraph-lab/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "memgraph-lab.fullname" . }}-test-connection" 5 | labels: 6 | {{- include "memgraph-lab.labels" . | nindent 4 }} 7 | annotations: 8 | "helm.sh/hook": test 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "memgraph-lab.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /charts/memgraph-lab/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: memgraph/lab 3 | # Overrides the image tag whose default is v{{ .Chart.AppVersion }} 4 | tag: "" 5 | pullPolicy: IfNotPresent 6 | 7 | replicaCount: 1 8 | 9 | service: 10 | type: ClusterIP 11 | port: 3000 12 | targetPort: 3000 13 | protocol: TCP 14 | annotations: {} 15 | 16 | # Annotations to add to the statefulSet 17 | statefulSetAnnotations: {} 18 | # Annotations to add to the Pod 19 | podAnnotations: {} 20 | 21 | resources: {} 22 | # We usually recommend not to specify default resources and to leave this as a conscious 23 | # choice for the user. This also increases chances charts run on environments with little 24 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 25 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 26 | # limits: 27 | # cpu: 100m 28 | # memory: 128Mi 29 | # requests: 30 | # cpu: 100m 31 | # memory: 128Mi 32 | 33 | serviceAccount: 34 | # Specifies whether a service account should be created 35 | create: true 36 | # Annotations to add to the service account 37 | annotations: {} 38 | # The name of the service account to use. 39 | # If not set and create is true, a name is generated using the fullname template 40 | name: "" 41 | 42 | imagePullSecrets: [] 43 | nameOverride: "" 44 | fullnameOverride: "" 45 | 46 | # Supported env variables: https://memgraph.com/docs/data-visualization/install-and-connect#environment-variables 47 | env: 48 | - name: QUICK_CONNECT_MG_HOST 49 | value: memgraph-db 50 | - name: QUICK_CONNECT_MG_PORT 51 | value: "7687" 52 | - name: BASE_PATH 53 | value: "" 54 | 55 | podSecurityContext: {} 56 | 57 | securityContext: {} 58 | # capabilities: 59 | # drop: 60 | # - ALL 61 | # readOnlyRootFilesystem: true 62 | # runAsNonRoot: true 63 | # runAsUser: 1000 64 | 65 | ingress: 66 | enabled: false 67 | className: "" 68 | annotations: {} 69 | # kubernetes.io/ingress.class: nginx 70 | # kubernetes.io/tls-acme: "true" 71 | hosts: 72 | - host: chart-example.local 73 | paths: 74 | - path: / 75 | pathType: ImplementationSpecific 76 | tls: [] 77 | # - secretName: chart-example-tls 78 | # hosts: 79 | # - chart-example.local 80 | 81 | 82 | autoscaling: 83 | enabled: false 84 | minReplicas: 1 85 | maxReplicas: 100 86 | targetCPUUtilizationPercentage: 80 87 | # targetMemoryUtilizationPercentage: 80 88 | 89 | nodeSelector: {} 90 | 91 | tolerations: [] 92 | 93 | affinity: {} 94 | -------------------------------------------------------------------------------- /charts/memgraph/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/memgraph/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: memgraph 3 | home: https://memgraph.com/ 4 | type: application 5 | version: 0.2.4 6 | appVersion: "3.3.0" 7 | description: MemgraphDB Helm Chart 8 | keywords: 9 | - graph 10 | - database 11 | - cypher 12 | - analytics 13 | icon: https://public-assets.memgraph.com/memgraph-logo/logo-large.png 14 | sources: 15 | - https://github.com/memgraph/memgraph 16 | maintainers: 17 | - name: Memgraph 18 | email: tech@memgraph.com 19 | -------------------------------------------------------------------------------- /charts/memgraph/README.md: -------------------------------------------------------------------------------- 1 | ## Memgraph Standalone Kubernetes Helm Chart 2 | A Helm Chart for deploying standalone Memgraph database on Kubernetes. 3 | 4 | ## Installing the Memgraph Helm Chart 5 | To install the Memgraph Helm Chart, follow the steps below: 6 | ``` 7 | helm install memgraph/memgraph 8 | ``` 9 | Replace `` with a name of your choice for the release. 10 | 11 | ## Changing the default chart values 12 | To change the default chart values, run the command with the specified set of flags: 13 | ``` 14 | helm install memgraph/memgraph --set =,=,... 15 | ``` 16 | Or you can modify a `values.yaml` file and override the desired values: 17 | ``` 18 | helm install memgraph/memgraph -f values.yaml 19 | ``` 20 | 21 | ## Configuration Options 22 | 23 | The following table lists the configurable parameters of the Memgraph chart and their default values. 24 | 25 | | Parameter | Description | Default | 26 | | -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | 27 | | `image.repository` | Memgraph Docker image repository | `memgraph/memgraph` | 28 | | `image.tag` | Specific tag for the Memgraph Docker image. Overrides the image tag whose default is chart version. | `""` (Defaults to chart's app version) | 29 | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | 30 | | `useImagePullSecrets` | Override the default imagePullSecrets | `false` | 31 | | `imagePullSecrets` | Specify image pull secrets | `- name: regcred` | 32 | | `replicaCount` | Number of Memgraph instances to run. Note: no replication or HA support. | `1` | 33 | | `affinity.nodeKey` | Key for node affinity (Preferred) | `""` | 34 | | `affinity.nodeValue` | Value for node affinity (Preferred) | `""` | 35 | | `nodeSelector` | Constrain which nodes your Memgraph pod is eligible to be scheduled on, based on the labels on the nodes. Left empty by default. | `{}` | 36 | | `service.type` | Kubernetes service type | `ClusterIP` | 37 | | `service.enableBolt` | Enable Bolt protocol | `true` | 38 | | `service.boltPort` | Bolt protocol port | `7687` | 39 | | `service.enableWebsocketMonitoring` | Enable WebSocket monitoring | `false` | 40 | | `service.websocketPortMonitoring` | WebSocket monitoring port | `7444` | 41 | | `service.enableHttpMonitoring` | Enable HTTP monitoring | `false` | 42 | | `service.httpPortMonitoring` | HTTP monitoring port | `9091` | 43 | | `service.annotations` | Annotations to add to the service | `{}` | 44 | | `persistentVolumeClaim.createStorageClaim` | Enable creation of a Persistent Volume Claim for storage | `true` | 45 | | `persistentVolumeClaim.storageClassName` | Storage class name for the persistent volume claim | `""` | 46 | | `persistentVolumeClaim.storageSize` | Size of the persistent volume claim for storage | `10Gi` | 47 | | `persistentVolumeClaim.existingClaim` | Use an existing Persistent Volume Claim | `memgraph-0` | 48 | | `persistentVolumeClaim.storageVolumeName` | Name of an existing Volume to create a PVC for | `""` | 49 | | `persistentVolumeClaim.createLogStorage` | Enable creation of a Persistent Volume Claim for logs | `true` | 50 | | `persistentVolumeClaim.logStorageClassName` | Storage class name for the persistent volume claim for logs | `""` | 51 | | `persistentVolumeClaim.logStorageSize` | Size of the persistent volume claim for logs | `1Gi` | 52 | | `persistentVolumeClaim.createUserClaim` | Create a Dynamic Persistant Volume Claim for Configs, Certificates (e.g. Bolt cert ) and rest of User related files | `false` | 53 | | `persistentVolumeClaim.userStorageClassName` | Storage class name for the persistent volume claim for user storage | `""` | 54 | | `persistentVolumeClaim.userStorageSize` | Size of the persistent volume claim for user storage | `1Gi` | 55 | | `persistentVolumeClaim.userStorageAccessMode`| Storage Class Access Mode. If you need a different pod to add data into Memgraph (e.g. CSV files) set this to "ReadWriteMany" | `ReadWriteOnce` | 56 | | `persistentVolumeClaim.userMountPath` | Where to mount the `userStorageClass` you should set this variable if you are enabling the `UserClaim` | `""` | 57 | | `memgraphConfig` | List of strings defining Memgraph configuration settings | `["--also-log-to-stderr=true"]` | 58 | | `secrets.enabled` | Enable the use of Kubernetes secrets for Memgraph credentials | `false` | 59 | | `secrets.name` | The name of the Kubernetes secret containing Memgraph credentials | `memgraph-secrets` | 60 | | `secrets.userKey` | The key in the Kubernetes secret for the Memgraph user, the value is passed to the `MEMGRAPH_USER` env | `USER` | 61 | | `secrets.passwordKey` | The key in the Kubernetes secret for the Memgraph password, the value is passed to the `MEMGRAPH_PASSWORD` | `PASSWORD` | 62 | | `memgraphEnterpriseLicense` | Memgraph Enterprise License | `""` | 63 | | `memgraphOrganizationName` | Organization name for Memgraph Enterprise License | `""` | 64 | | `statefulSetAnnotations` | Annotations to add to the stateful set | `{}` | 65 | | `podAnnotations` | Annotations to add to the pod | `{}` | 66 | | `resources` | CPU/Memory resource requests/limits. Left empty by default. | `{}` | 67 | | `tolerations` | A toleration is applied to a pod and allows the pod to be scheduled on nodes with matching taints. Left empty by default. | `[]` | 68 | | `serviceAccount.create` | Specifies whether a service account should be created | `true` | 69 | | `serviceAccount.annotations` | Annotations to add to the service account | `{}` | 70 | | `serviceAccount.name` | The name of the service account to use. If not set and create is true, a name is generated. | `""` | 71 | | `container.terminationGracePeriodSeconds` | Grace period for pod termination | `1800` | 72 | | `container.livenessProbe.tcpSocket.port` | Port used for TCP connection. Should be the same as bolt port. | `7687` | 73 | | `container.livenessProbe.failureThreshold` | Failure threshold for liveness probe | `20` | 74 | | `container.livenessProbe.timeoutSeconds` | Initial delay for readiness probe | `10` | 75 | | `container.livenessProbe.periodSeconds` | Period seconds for readiness probe | `5` | 76 | | `container.readinessProbe.tcpSocket.port` | Port used for TCP connection. Should be the same as bolt port. | `7687` | 77 | | `container.readinessProbe.failureThreshold` | Failure threshold for readiness probe | `20` | 78 | | `container.readinessProbe.timeoutSeconds` | Initial delay for readiness probe | `10` | 79 | | `container.readinessProbe.periodSeconds` | Period seconds for readiness probe | `5` | 80 | | `container.startupProbe.tcpSocket.port` | Port used for TCP connection. Should be the same as bolt port. | `7687` | 81 | | `container.startupProbe.failureThreshold` | Failure threshold for startup probe | `1440` | 82 | | `container.startupProbe.periodSeconds` | Period seconds for startup probe | `10` | 83 | | `nodeSelectors` | Node selectors for pod. Left empty by default. | `{}` | 84 | | `customQueryModules` | List of custom Query modules that should be mounted to Memgraph Pod | `[]` | 85 | | `storageClass.create` | If set to true, new StorageClass will be created. | `false` | 86 | | `storageClass.name` | Name of the StorageClass | `"memgraph-generic-storage-class"` | 87 | | `storageClass.provisioner` | Provisioner for the StorageClass | `""` | 88 | | `storageClass.storageType` | Type of storage for the StorageClass | `""` | 89 | | `storageClass.fsType` | Filesystem type for the StorageClass | `""` | 90 | | `storageClass.reclaimPolicy` | Reclaim policy for the StorageClass | `Retain` | 91 | | `storageClass.volumeBindingMode` | Volume binding mode for the StorageClass | `Immediate` | 92 | | `sysctlInitContainer.enabled` | Enable the init container to set sysctl parameters | `true` | 93 | | `sysctlInitContainer.maxMapCount` | Value for `vm.max_map_count` to be set by the init container | `262144` | 94 | | `sysctlInitContainer.image.repository` | Busybox image repository | `library/busybox` | 95 | | `sysctlInitContainer.image.tag` | Specific tag for the Busybox Docker image | `latest` | 96 | | `sysctlInitContainer.image.pullPolicy` | Image pull policy for busybox | `IfNotPresent` | 97 | 98 | **Note:** It's often recommended not to specify default resources and leave it as a conscious choice for the user. If you want to specify resources, uncomment the following lines in your `values.yaml`, adjust them as necessary: 99 | 100 | ```yaml 101 | resources: 102 | limits: 103 | cpu: "100m" 104 | memory: "128Mi" 105 | requests: 106 | cpu: "100m" 107 | memory: "128Mi" 108 | 109 | ``` 110 | 111 | The `memgraphConfig` parameter should be a list of strings defining the values of Memgraph configuration settings. For example, this is how you can define `memgraphConfig` parameter in your `values.yaml`: 112 | 113 | ```yaml 114 | memgraphConfig: 115 | - "--also-log-to-stderr=true" 116 | - "--log-level=TRACE" 117 | - "--log-file=''" 118 | 119 | ``` 120 | 121 | 122 | If you are using the Memgraph user, make sure you have secrets set: 123 | 124 | ``` 125 | kubectl create secret generic memgraph-secrets --from-literal=USER=myuser --from-literal=PASSWORD=mypassword 126 | ``` 127 | 128 | For all available database settings, refer to the [Configuration settings reference guide](https://memgraph.com/docs/memgraph/reference-guide/configuration). 129 | -------------------------------------------------------------------------------- /charts/memgraph/memgraph.conf: -------------------------------------------------------------------------------- 1 | # Memgraph Configuration 2 | # 3 | # This is the main configuration file for Memgraph. You can modify this file to 4 | # suit your specific needs. Additional configuration can be specified by 5 | # including another configuration file, in a file pointed to by the 6 | # 'MEMGRAPH_CONFIG' environment variable or by passing arguments on the command 7 | # line. 8 | # 9 | # Each configuration setting is in the form: '--setting-name=value'. 10 | 11 | 12 | ## Audit 13 | 14 | # Interval (in milliseconds) used for flushing the audit log buffer. [int32] 15 | # --audit-buffer-flush-interval-ms=200 16 | 17 | # Maximum number of items in the audit log buffer. [int32] 18 | # --audit-buffer-size=100000 19 | 20 | # Set to true to enable audit logging. [bool] 21 | # --audit-enabled=false 22 | 23 | 24 | ## Auth 25 | 26 | # Set to false to disable creation of missing roles. [bool] 27 | # --auth-module-create-missing-role=true 28 | 29 | # Set to false to disable creation of missing users. [bool] 30 | # --auth-module-create-missing-user=true 31 | 32 | # Absolute path to the auth module executable that should be used. [string] 33 | # --auth-module-executable=/usr/lib/memgraph/auth_module/example.py 34 | 35 | # Set to false to disable management of roles through the auth module. [bool] 36 | # --auth-module-manage-roles=true 37 | 38 | # Timeout (in milliseconds) used when waiting for a response from the auth 39 | # module. [int32] 40 | # --auth-module-timeout-ms=10000 41 | 42 | # Set to false to disable null passwords. [bool] 43 | # --auth-password-permit-null=true 44 | 45 | # The regular expression that should be used to match the entire entered 46 | # password to ensure its strength. [string] 47 | # --auth-password-strength-regex=.+ 48 | 49 | # Set to the regular expression that each user or role name must fulfill. 50 | # [string] 51 | # --auth-user-or-role-name-regex=[a-zA-Z0-9_.+-@]+ 52 | 53 | 54 | ## Bolt 55 | 56 | # IP address on which the Bolt server should listen. [string] 57 | # --bolt-address=0.0.0.0 58 | 59 | # Certificate file which should be used for the Bolt server. [string] 60 | # --bolt-cert-file=/etc/memgraph/ssl/cert.pem 61 | 62 | # Key file which should be used for the Bolt server. [string] 63 | # --bolt-key-file=/etc/memgraph/ssl/key.pem 64 | 65 | # Number of workers used by the Bolt server. By default, this will be the number 66 | # of processing units available on the machine. [int32] 67 | # --bolt-num-workers= 68 | 69 | # Port on which the Bolt server should listen. [int32] 70 | # --bolt-port=7687 71 | 72 | # Server name which the database should send to the client in the Bolt INIT 73 | # message. [string] 74 | # --bolt-server-name-for-init= 75 | 76 | # Time in seconds after which inactive Bolt sessions will be closed. [int32] 77 | # --bolt-session-inactivity-timeout=1800 78 | 79 | 80 | ## Init 81 | 82 | # Path to cypherl file that is used for creating data after server starts. 83 | # [string] 84 | # --init-data-file= 85 | 86 | # Path to cypherl file that is used for configuring users and database schema 87 | # before server starts. [string] 88 | # --init-file= 89 | 90 | 91 | ## Log 92 | 93 | # Path to where the log should be stored. [string] 94 | --log-file=/var/log/memgraph/memgraph.log 95 | 96 | # Minimum log level. Allowed values: TRACE, DEBUG, INFO, WARNING, ERROR, 97 | # CRITICAL [string] 98 | --log-level=WARNING 99 | 100 | 101 | ## Memory 102 | 103 | # Total memory limit in MiB. Set to 0 to use the default values which are 100% 104 | # of the phyisical memory if the swap is enabled and 90% of the physical memory 105 | # otherwise. [uint64] 106 | --memory-limit=0 107 | 108 | # Memory warning threshold, in MB. If Memgraph detects there is less available 109 | # RAM it will log a warning. Set to 0 to disable. [uint64] 110 | # --memory-warning-threshold=1024 111 | 112 | 113 | ## Metrics 114 | 115 | # IP address on which the Memgraph server for exposing metrics should listen. 116 | # [string] 117 | # --metrics-address=0.0.0.0 118 | 119 | # Port on which the Memgraph server for exposing metrics should listen. [int32] 120 | # --metrics-port=9091 121 | 122 | 123 | ## Monitoring 124 | 125 | # IP address on which the websocket server for Memgraph monitoring should 126 | # listen. [string] 127 | # --monitoring-address=0.0.0.0 128 | 129 | # Port on which the websocket server for Memgraph monitoring should listen. 130 | # [int32] 131 | # --monitoring-port=7444 132 | 133 | 134 | ## Query 135 | 136 | # Use the cost-estimating query planner. [bool] 137 | # --query-cost-planner=true 138 | 139 | # Maximum allowed query execution time. Queries exceeding this limit will be 140 | # aborted. Value of 0 means no limit. [double] 141 | # --query-execution-timeout-sec=600 142 | 143 | # Maximum number of generated plans for a query. [uint64] 144 | # --query-max-plans=1000 145 | 146 | # Directory where modules with custom query procedures are stored. NOTE: 147 | # Multiple comma-separated directories can be defined. [string] 148 | --query-modules-directory=/usr/lib/memgraph/query_modules 149 | 150 | # Time to live for cached query plans, in seconds. [int32] 151 | # --query-plan-cache-ttl=60 152 | 153 | # Maximum count of indexed vertices which provoke indexed lookup and then expand 154 | # to existing, instead of a regular expand. Default is 10, to turn off use -1. 155 | # [int64] 156 | # --query-vertex-count-to-expand-existing=10 157 | 158 | 159 | ## Storage 160 | 161 | # Storage garbage collector interval (in seconds). [uint64] 162 | # --storage-gc-cycle-sec=30 163 | 164 | # The number of edges and vertices stored in a batch in a snapshot file. 165 | # [uint64] 166 | # --storage-items-per-batch=1000000 167 | 168 | # Controls whether the index creation can be done in a multithreaded fashion. 169 | # [bool] 170 | --storage-parallel-index-recovery=false 171 | 172 | # Controls whether edges have properties. [bool] 173 | --storage-properties-on-edges=true 174 | 175 | # Controls whether the storage recovers persisted data on startup. [bool] 176 | --storage-recover-on-startup=true 177 | 178 | # The number of threads used to recover persisted data from disk. [uint64] 179 | # --storage-recovery-thread-count=8 180 | 181 | # Storage snapshot creation interval (in seconds). Set to 0 to disable periodic 182 | # snapshot creation. [uint64] 183 | --storage-snapshot-interval-sec=300 184 | 185 | # Controls whether the storage creates another snapshot on exit. [bool] 186 | --storage-snapshot-on-exit=true 187 | 188 | # The number of snapshots that should always be kept. [uint64] 189 | --storage-snapshot-retention-count=3 190 | 191 | # Controls whether the storage uses write-ahead-logging. To enable WAL periodic 192 | # snapshots must be enabled. [bool] 193 | --storage-wal-enabled=true 194 | 195 | # Issue a 'fsync' call after this amount of transactions are written to the WAL 196 | # file. Set to 1 for fully synchronous operation. [uint64] 197 | # --storage-wal-file-flush-every-n-tx=100000 198 | 199 | # Minimum file size of each WAL file. [uint64] 200 | # --storage-wal-file-size-kib=20480 201 | 202 | 203 | ## Stream 204 | 205 | # Number of times to retry when a stream transformation fails to commit because 206 | # of conflicting transactions [uint32] 207 | # --stream-transaction-conflict-retries=30 208 | 209 | # Retry interval in milliseconds when a stream transformation fails to commit 210 | # because of conflicting transactions [uint32] 211 | # --stream-transaction-retry-interval=500 212 | 213 | 214 | ## Other 215 | 216 | # Controls whether LOAD CSV clause is allowed in queries. [bool] 217 | # --allow-load-csv=true 218 | 219 | # Path to directory in which to save all permanent data. [string] 220 | --data-directory=/var/lib/memgraph 221 | 222 | # Print usage and exit. [bool] 223 | # --h=false 224 | 225 | # Default isolation level used for the transactions. Allowed values: 226 | # SNAPSHOT_ISOLATION, READ_COMMITTED, READ_UNCOMMITTED [string] 227 | --isolation-level=SNAPSHOT_ISOLATION 228 | 229 | # List of default Kafka brokers as a comma separated list of broker host or 230 | # host:port. [string] 231 | # --kafka-bootstrap-servers= 232 | 233 | # The password encryption algorithm used for authentication. [string] 234 | # --password-encryption-algorithm=bcrypt 235 | 236 | # Default URL used while connecting to Pulsar brokers. [string] 237 | # --pulsar-service-url= 238 | 239 | # The time duration between two replica checks/pings. If < 1, replicas will NOT 240 | # be checked at all. NOTE: The MAIN instance allocates a new thread for each 241 | # REPLICA. [uint64] 242 | # --replication-replica-check-frequency-sec=1 243 | 244 | # Set to true to enable telemetry. We collect information about the running 245 | # system (CPU and memory information) and information about the database runtime 246 | # (vertex and edge counts and resource usage) to allow for easier improvement of 247 | # the product. [bool] 248 | --telemetry-enabled=true 249 | 250 | 251 | # Additional Configuration Inclusion 252 | # 253 | # You can include additional configuration files from this file. Additional 254 | # files are processed after this file. Settings that are set in the additional 255 | # files will override previously set values. Additional configuration files are 256 | # specified with the '--flag-file' flag. 257 | # 258 | # Example: 259 | # 260 | # --flag-file=another.conf 261 | -------------------------------------------------------------------------------- /charts/memgraph/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing Memgraph! 🎉 2 | 3 | Connect to Memgraph via Lab, mgconsole, or any other client to your Memgraph instance. By default Memgraph listens on port 7687 for a bolt connection inside the cluster. 4 | Make sure your are connecting to the correct ip address of your instance. For details check the configuration on your cloud provider(aws, gcp, azure, etc.) 5 | 6 | Important notes 7 | - always fix the Memgraph image tag to a specific version. Setting the image tag to "latest" can trigger automatic upgrades on pod restarts in some cases combined with respective pullPolicy of the image. 8 | - check the requested and maximum limits of memory. Setting Memgraph's flag --memory-limit to an amount that is less to the maximum amount your pod has been given can trigger pod restarts because of preemptive eviction 9 | 10 | If you are connecting via mgconsole, you can use the following command: 11 | 12 | mgconsole --host --port 13 | 14 | If you are connecting via Lab, specify your instance ip address and port in Memgraph Lab GUI. 15 | -------------------------------------------------------------------------------- /charts/memgraph/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "memgraph.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "memgraph.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "memgraph.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "memgraph.labels" -}} 37 | helm.sh/chart: {{ include "memgraph.chart" . }} 38 | {{ include "memgraph.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "memgraph.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "memgraph.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "memgraph.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "memgraph.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | 64 | {{- define "container.readinessProbe" -}} 65 | readinessProbe: 66 | tcpSocket: 67 | port: {{ .tcpSocket.port }} 68 | failureThreshold: {{ .failureThreshold }} 69 | timeoutSeconds: {{ .timeoutSeconds }} 70 | periodSeconds: {{ .periodSeconds }} 71 | {{- end }} 72 | 73 | 74 | {{- define "container.livenessProbe" -}} 75 | livenessProbe: 76 | tcpSocket: 77 | port: {{ .tcpSocket.port }} 78 | failureThreshold: {{ .failureThreshold }} 79 | timeoutSeconds: {{ .timeoutSeconds }} 80 | periodSeconds: {{ .periodSeconds }} 81 | {{- end }} 82 | 83 | 84 | {{- define "container.startupProbe" -}} 85 | startupProbe: 86 | tcpSocket: 87 | port: {{ .tcpSocket.port }} 88 | failureThreshold: {{ .failureThreshold }} 89 | timeoutSeconds: {{ .timeoutSeconds }} 90 | periodSeconds: {{ .periodSeconds }} 91 | {{- end }} 92 | -------------------------------------------------------------------------------- /charts/memgraph/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "memgraph.fullname" . }} 5 | {{- with .Values.service.labels }} 6 | labels: 7 | {{- toYaml . | nindent 4 }} 8 | {{- end }} 9 | {{- with .Values.service.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | spec: 14 | type: {{ .Values.service.type }} 15 | ports: 16 | {{- if .Values.service.boltPort }} 17 | - port: {{ .Values.service.boltPort }} 18 | targetPort: {{ .Values.service.boltPort }} 19 | protocol: TCP 20 | name: tcp-bolt-port 21 | {{- end }} 22 | {{- if .Values.service.enableWebsocketMonitoring }} 23 | - port: {{ .Values.service.websocketPortMonitoring }} 24 | targetPort: {{ .Values.service.websocketPortMonitoring }} 25 | protocol: TCP 26 | name: tcp-websocket-monitoring-port 27 | {{- end }} 28 | {{- if .Values.service.enableHttpMonitoring }} 29 | - port: {{ .Values.service.httpPortMonitoring }} 30 | targetPort: {{ .Values.service.httpPortMonitoring }} 31 | protocol: http 32 | name: http-monitoring-port 33 | {{- end }} 34 | selector: 35 | {{- include "memgraph.selectorLabels" . | nindent 4 }} 36 | -------------------------------------------------------------------------------- /charts/memgraph/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "memgraph.serviceAccountName" . }} 6 | labels: 7 | {{- include "memgraph.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /charts/memgraph/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | # templates/statefulset.yaml 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: {{ include "memgraph.fullname" . }} 6 | labels: 7 | {{- include "memgraph.labels" . | nindent 4 }} 8 | {{- with .Values.statefulSetAnnotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | replicas: {{ .Values.replicaCount }} 14 | serviceName: {{ include "memgraph.fullname" . }} 15 | selector: 16 | matchLabels: 17 | {{- include "memgraph.selectorLabels" . | nindent 6 }} 18 | podManagementPolicy: OrderedReady 19 | updateStrategy: 20 | type: RollingUpdate 21 | template: 22 | metadata: 23 | labels: 24 | {{- include "memgraph.labels" . | nindent 8 }} 25 | {{- with .Values.podAnnotations }} 26 | annotations: 27 | {{- toYaml . | nindent 4 }} 28 | {{- end }} 29 | 30 | spec: 31 | {{- if .Values.serviceAccount.create }} 32 | serviceAccount: {{ include "memgraph.serviceAccountName" . }} 33 | {{- else if .Values.serviceAccount.name }} 34 | serviceAccount: {{ .Values.serviceAccount.name | quote }} 35 | {{- end }} 36 | initContainers: 37 | - name: init-volume-mounts 38 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 39 | volumeMounts: 40 | {{- if .Values.persistentVolumeClaim.createStorageClaim }} 41 | - name: {{ include "memgraph.fullname" . }}-lib-storage 42 | mountPath: /var/lib/memgraph 43 | {{- end }} 44 | {{- if .Values.persistentVolumeClaim.createLogStorage }} 45 | - name: {{ include "memgraph.fullname" . }}-log-storage 46 | mountPath: /var/log/memgraph 47 | {{- end }} 48 | {{- if .Values.persistentVolumeClaim.createUserClaim }} 49 | - name: {{ include "memgraph.fullname" . }}-user-storage 50 | mountPath: {{ .Values.persistentVolumeClaim.userMountPath }} 51 | {{- end }} 52 | {{- if .Values.persistentVolumeClaim.createCoreDumpsClaim }} 53 | - name: {{ include "memgraph.fullname" . }}-core-dumps-storage 54 | mountPath: {{ .Values.persistentVolumeClaim.coreDumpsMountPath }} 55 | {{- end }} 56 | command: ["/bin/sh", "-c"] 57 | # The permissions have to be explicitly adjusted because under some 58 | # k8s environments permissions set under 59 | # https://github.com/memgraph/memgraph/blob/master/release/debian/postinst 60 | # get overwritten. Sometimes, PVC are created using new partitions -> 61 | # lost+found directory should not change its permissions so it has to 62 | # be excluded. 63 | args: 64 | - > 65 | {{- if .Values.persistentVolumeClaim.createStorageClaim }} 66 | cd /var/lib/memgraph; 67 | find . -path ./lost+found -prune -o -exec chown {{ .Values.memgraphUserGroupId }} {} +; 68 | {{- end }} 69 | {{- if .Values.persistentVolumeClaim.createLogStorage }} 70 | cd /var/log/memgraph; 71 | find . -path ./lost+found -prune -o -exec chown {{ .Values.memgraphUserGroupId }} {} +; 72 | {{- end }} 73 | {{- if .Values.persistentVolumeClaim.createUserClaim }} 74 | cd {{ .Values.persistentVolumeClaim.userMountPath }}; 75 | find . -path ./lost+found -prune -o -exec chown {{ .Values.memgraphUserGroupId }} {} +; 76 | {{- end }} 77 | {{- if .Values.persistentVolumeClaim.createCoreDumpsClaim }} 78 | cd {{ .Values.persistentVolumeClaim.coreDumpsMountPath }}; 79 | find . -path ./lost+found -prune -o -exec chown {{ .Values.memgraphUserGroupId }} {} +; 80 | {{- end }} 81 | securityContext: 82 | readOnlyRootFilesystem: true 83 | runAsUser: 0 84 | capabilities: 85 | drop: ["ALL"] 86 | add: ["CHOWN"] 87 | 88 | {{- if .Values.sysctlInitContainer.enabled }} 89 | - name: init-sysctl 90 | image: "{{ .Values.sysctlInitContainer.image.repository }}:{{ .Values.sysctlInitContainer.image.tag }}" 91 | imagePullPolicy: {{ .Values.sysctlInitContainer.image.pullPolicy }} 92 | command: ['sh', '-c', 'sysctl -w vm.max_map_count={{ .Values.sysctlInitContainer.maxMapCount }}'] 93 | securityContext: 94 | privileged: true 95 | runAsUser: 0 96 | {{- end }} 97 | 98 | {{- if .Values.persistentVolumeClaim.createCoreDumpsClaim }} 99 | - name: init-core-dumps 100 | image: busybox 101 | command: ['/bin/sh', '-c'] 102 | args: 103 | - > 104 | echo '{{ .Values.persistentVolumeClaim.coreDumpsMountPath }}/core.%e.%p.%t.%s' | tee /proc/sys/kernel/core_pattern; 105 | if [ -d /proc/sys/kernel/yama ]; then echo '0' | tee /proc/sys/kernel/yama/ptrace_scope; fi 106 | securityContext: 107 | privileged: true 108 | runAsUser: 0 109 | {{- end }} 110 | 111 | terminationGracePeriodSeconds: {{ .Values.container.terminationGracePeriodSeconds }} 112 | securityContext: 113 | {{- if .Values.useImagePullSecrets }} 114 | imagePullSecrets: 115 | {{- toYaml .Values.imagePullSecrets | nindent 4 }} 116 | {{- end }} 117 | volumes: 118 | - name: {{ include "memgraph.fullname" . }}-lib-storage 119 | persistentVolumeClaim: 120 | {{- if .Values.persistentVolumeClaim.createStorageClaim }} 121 | claimName: {{ include "memgraph.fullname" . }}-lib-storage 122 | {{- else }} 123 | claimName: {{ .Values.persistentVolumeClaim.existingClaim }} 124 | {{- end}} 125 | 126 | {{- if .Values.persistentVolumeClaim.createLogStorage }} 127 | - name: {{ include "memgraph.fullname" . }}-log-storage 128 | persistentVolumeClaim: 129 | claimName: {{ include "memgraph.fullname" . }}-log-storage 130 | {{- end }} 131 | 132 | {{- if .Values.persistentVolumeClaim.createUserClaim }} 133 | - name: {{ include "memgraph.fullname" . }}-user-storage 134 | persistentVolumeClaim: 135 | claimName: {{ include "memgraph.fullname" . }}-user-storage 136 | {{- end }} 137 | {{- if .Values.persistentVolumeClaim.createCoreDumpsClaim }} 138 | - name: {{ include "memgraph.fullname" . }}-core-dumps-storage 139 | persistentVolumeClaim: 140 | claimName: {{ include "memgraph.fullname" . }}-core-dumps-storage 141 | {{- end }} 142 | {{- range .Values.customQueryModules }} 143 | - name: {{ .volume | quote }} 144 | configMap: 145 | name: {{ .volume | quote }} 146 | {{- end }} 147 | 148 | 149 | containers: 150 | - name: memgraph 151 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 152 | args: 153 | {{- range .Values.memgraphConfig }} 154 | - {{ . | quote }} 155 | {{- end }} 156 | {{- if not .Values.persistentVolumeClaim.createLogStorage }} 157 | - "--log-file=" 158 | {{- end }} 159 | imagePullPolicy: {{ .Values.image.pullPolicy }} 160 | ports: 161 | - name: bolt 162 | containerPort: {{ .Values.service.boltPort }} 163 | - name: websocket 164 | containerPort: {{ .Values.service.websocketPortMonitoring }} 165 | - name: http 166 | containerPort: {{ .Values.service.httpPortMonitoring }} 167 | securityContext: 168 | allowPrivilegeEscalation: false 169 | capabilities: 170 | drop: [ "ALL" ] 171 | # Run by 'memgraph' user as specified in the Dockerfile 172 | {{- include "container.readinessProbe" .Values.container.readinessProbe | nindent 10 }} 173 | {{- include "container.livenessProbe" .Values.container.livenessProbe | nindent 10 }} 174 | {{- include "container.startupProbe" .Values.container.startupProbe | nindent 10 }} 175 | {{- with .Values.resources }} 176 | resources: 177 | {{- toYaml . | nindent 12 }} 178 | {{- end }} 179 | env: 180 | {{- if .Values.secrets.enabled }} 181 | - name: MEMGRAPH_USER 182 | valueFrom: 183 | secretKeyRef: 184 | name: {{ .Values.secrets.name }} 185 | key: {{ .Values.secrets.userKey }} 186 | - name: MEMGRAPH_PASSWORD 187 | valueFrom: 188 | secretKeyRef: 189 | name: {{ .Values.secrets.name }} 190 | key: {{ .Values.secrets.passwordKey }} 191 | {{- end }} 192 | {{ if .Values.memgraphEnterpriseLicense }} 193 | - name: MEMGRAPH_ENTERPRISE_LICENSE 194 | value: {{ .Values.memgraphEnterpriseLicense }} 195 | - name: MEMGRAPH_ORGANIZATION_NAME 196 | value: {{ .Values.memgraphOrganizationName}} 197 | {{- end}} 198 | volumeMounts: 199 | - name: {{ include "memgraph.fullname" . }}-lib-storage 200 | mountPath: /var/lib/memgraph 201 | {{- if .Values.persistentVolumeClaim.createLogStorage }} 202 | - name: {{ include "memgraph.fullname" . }}-log-storage 203 | mountPath: /var/log/memgraph 204 | {{- end }} 205 | {{- if .Values.persistentVolumeClaim.createCoreDumpsClaim }} 206 | - name: {{ include "memgraph.fullname" . }}-core-dumps-storage 207 | mountPath: {{ .Values.persistentVolumeClaim.coreDumpsMountPath }} 208 | {{- end }} 209 | {{- if .Values.persistentVolumeClaim.createUserClaim }} 210 | - name: {{ include "memgraph.fullname" . }}-user-storage 211 | mountPath: {{ .Values.persistentVolumeClaim.userMountPath }} 212 | {{- end }} 213 | {{- range .Values.customQueryModules }} 214 | - name: {{ .volume | quote }} 215 | mountPath: {{ printf "/var/lib/memgraph/internal_modules/%s" .file }} 216 | subPath: {{ .file | quote }} 217 | {{- end }} 218 | {{- with .Values.nodeSelector }} 219 | nodeSelector: 220 | {{- toYaml . | nindent 8 }} 221 | {{- end }} 222 | {{- with .Values.tolerations }} 223 | tolerations: 224 | {{- toYaml . | nindent 8 }} 225 | {{- end }} 226 | {{- if and .Values.affinity.nodeKey .Values.affinity.nodeValue }} 227 | affinity: 228 | nodeAffinity: 229 | preferredDuringSchedulingIgnoredDuringExecution: 230 | - weight: 100 231 | preference: 232 | matchExpressions: 233 | - key: {{ .Values.affinity.nodeKey }} 234 | operator: In 235 | values: 236 | - {{ .Values.affinity.nodeValue }} 237 | {{- end }} 238 | volumeClaimTemplates: 239 | {{- if .Values.persistentVolumeClaim.createStorageClaim }} 240 | - metadata: 241 | name: {{ include "memgraph.fullname" . }}-lib-storage 242 | spec: 243 | accessModes: 244 | - "ReadWriteOnce" 245 | {{- if .Values.persistentVolumeClaim.storageClassName }} 246 | storageClassName: {{ .Values.persistentVolumeClaim.storageClassName }} 247 | {{- end }} 248 | resources: 249 | requests: 250 | storage: {{ .Values.persistentVolumeClaim.storageSize }} 251 | {{- if .Values.persistentVolumeClaim.storageVolumeName }} 252 | volumeName: {{ .Values.persistentVolumeClaim.storageVolumeName }} 253 | {{- end }} 254 | {{- end }} 255 | 256 | {{- if .Values.persistentVolumeClaim.createLogStorage }} 257 | - metadata: 258 | name: {{ include "memgraph.fullname" . }}-log-storage 259 | spec: 260 | accessModes: 261 | - "ReadWriteOnce" 262 | {{- if .Values.persistentVolumeClaim.logStorageClassName }} 263 | storageClassName: {{ .Values.persistentVolumeClaim.logStorageClassName }} 264 | {{- end }} 265 | resources: 266 | requests: 267 | storage: {{ .Values.persistentVolumeClaim.logStorageSize }} 268 | {{- end }} 269 | 270 | {{- if .Values.persistentVolumeClaim.createCoreDumpsClaim }} 271 | - metadata: 272 | name: {{ include "memgraph.fullname" . }}-core-dumps-storage 273 | spec: 274 | accessModes: 275 | - "ReadWriteOnce" 276 | {{- if .Values.persistentVolumeClaim.coreDumpsStorageClassName }} 277 | storageClassName: {{ .Values.persistentVolumeClaim.coreDumpsStorageClassName }} 278 | {{- end }} 279 | resources: 280 | requests: 281 | storage: {{ .Values.persistentVolumeClaim.coreDumpsStorageSize }} 282 | {{- end }} 283 | 284 | {{- if .Values.persistentVolumeClaim.createUserClaim }} 285 | - metadata: 286 | name: {{ include "memgraph.fullname" . }}-user-storage 287 | spec: 288 | accessModes: 289 | - {{ .Values.persistentVolumeClaim.userStorageAccessMode }} 290 | {{- if .Values.persistentVolumeClaim.userStorageClassName }} 291 | storageClassName: {{ .Values.persistentVolumeClaim.userStorageClassName }} 292 | {{- end }} 293 | resources: 294 | requests: 295 | storage: {{ .Values.persistentVolumeClaim.userStorageSize }} 296 | {{- end }} 297 | -------------------------------------------------------------------------------- /charts/memgraph/templates/storageclass.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.storageClass.create }} 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: {{ .Values.storageClass.name }} 6 | provisioner: {{ .Values.storageClass.provisioner }} 7 | parameters: 8 | type: {{ .Values.storageClass.storageType | }} 9 | fsType: {{ .Values.storageClass.fsType }} 10 | reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }} 11 | volumeBindingMode: {{ .Values.storageClass.volumeBindingMode }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /charts/memgraph/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: "{{ include "memgraph.fullname" . }}-test" 5 | labels: 6 | {{- include "memgraph.labels" . | nindent 4 }} 7 | annotations: 8 | "helm.sh/hook": test 9 | spec: 10 | template: 11 | spec: 12 | containers: 13 | - name: memgraph-test 14 | image: memgraph/memgraph:3.0.0 15 | command: ["/bin/sh", "-c"] 16 | args: 17 | - | 18 | echo "Running test connection to Memgraph"; 19 | echo "RETURN 0;" | mgconsole --username memgraph --password memgraph --host {{ include "memgraph.fullname" . }} --port 7687; 20 | restartPolicy: Never 21 | backoffLimit: 4 22 | -------------------------------------------------------------------------------- /charts/memgraph/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: memgraph/memgraph 3 | # Overrides the image tag whose default is v{{ .Chart.AppVersion }} 4 | # It is a bad practice to set the image tag name to latest as it can trigger automatic upgrade of the charts 5 | # With some of the pullPolicy values. Please consider fixing the tag to a specific Memgraph version 6 | tag: 7 | pullPolicy: IfNotPresent 8 | 9 | ## Override the default imagePullSecrets 10 | useImagePullSecrets: false 11 | imagePullSecrets: 12 | - name: regcred 13 | 14 | replicaCount: 1 15 | 16 | ## Node Affinity Preferred 17 | # By setting theses parameters the PREFERRED deployment will be done first on the match LABELS with key and value then on other nodes. 18 | # nodeKey: "nodegroup" give the name of a key 19 | # Operator is In 20 | # nodeValue: "memgraph" give the value of the key 21 | affinity: 22 | nodeKey: 23 | nodeValue: 24 | 25 | 26 | nodeSelector: {} 27 | 28 | tolerations: [] 29 | 30 | service: 31 | ## ClusterIP, NodePort, LoadBalancer 32 | # ClusterIP keep the service inside the cluster makes it secure 33 | # NodePort would create a external port change port: between 30000-32767 accessible to all the nodes and Public IPs if not in a VPC 34 | # LoadBalancer is compabile with Cloud Providers on port: 80 without SSL redirected to the 7687 35 | type: ClusterIP 36 | 37 | # Bolt Port 38 | enableBolt: true 39 | boltPort: 7687 # NOTE: Make sure to change port in probes if you change this value. 40 | 41 | # Websocket Monitoring 42 | enableWebsocketMonitoring: false 43 | websocketPortMonitoring: 7444 44 | 45 | # HTTP Monitoring 46 | enableHttpMonitoring: false 47 | httpPortMonitoring: 9091 48 | annotations: {} 49 | labels: {} 50 | 51 | persistentVolumeClaim: 52 | ## createStoragePVC `true` will create for each statefulset server a Persistant Volume Claim 53 | ## `false` will let you choose an existing Persistant Volume Claim or will create one with an existing volume 54 | createStorageClaim: true 55 | ## Using a Storage Class Name with policy `retain` will keep the Persistant Volume Claim and the Volume until you manually delete it 56 | ## If you use a Storage Class Name with policy `delete` the Persistant Volume Claim and Volume will be deleted when the helm release is deleted 57 | storageClassName: 58 | ## Storage Size must me at minimum 4x the maximum size of your Dataset for Snapshots 59 | ## See documentation for choosing the right size depending on the number of Snapshots you want to keep 60 | ## Default is 3 snapshots and you need space to create a new one and WAL files 61 | storageSize: 10Gi 62 | 63 | ## if `createStoragePVC` is `false` you can choose to use an existing Persistant Volume Claim 64 | ## Write the name and exising Persistant Volume Claim 65 | existingClaim: memgraph-0 66 | ## If you want to create a Persistant Volume Claim for an existing Volume 67 | storageVolumeName: 68 | 69 | ## Create a Persistant Volume Claim for Logs, if you use a Storage Class Name with policy `retain` the logs will be kept until you manually delete them 70 | # `false` will only write logs to stdout / stderr 71 | createLogStorage: true 72 | logStorageClassName: 73 | logStorageSize: 1Gi 74 | 75 | ## Create a Dynamic Persistant Volume Claim for Configs, Certificates (e.g. Bolt cert ) and rest of User related files 76 | createUserClaim: false 77 | userStorageClassName: 78 | userStorageSize: 1Gi 79 | userStorageAccessMode: "ReadWriteOnce" 80 | userMountPath: 81 | 82 | ## Create a Persistant Volume Claim for core dumps. 83 | createCoreDumpsClaim: false 84 | coreDumpsStorageClassName: 85 | coreDumpsStorageSize: 10Gi 86 | coreDumpsMountPath: /var/core/memgraph 87 | 88 | # Default Storage Class for data and logs, defaults are for Minikube, make sure to change it for production deployments 89 | # Examples provisioner: Minikube(k8s.io/minikube-hostpath) AWS (ebs.csi.aws.com), GCP (pd.csi.storage.gke.io), Azure (disk.csi.azure.com) 90 | # Examples storageType: Minikube(hostPath) AWS (gp2), GCP (pd-standard), Azure (StandardSSD_LRS) 91 | 92 | storageClass: 93 | create: false 94 | name: memgraph-generic-storage-class 95 | provisioner: "k8s.io/minikube-hostpath" 96 | storageType: "hostPath" 97 | fsType: ext4 98 | reclaimPolicy: Retain 99 | volumeBindingMode: Immediate 100 | 101 | memgraphConfig: 102 | # If setting the --memory-limit flag, check that the amount of resources that a pod has been given is more than the actual memory limit you give to Memgraph 103 | # Setting the Memgraph's memory limit to more than the available resources can trigger pod eviction and restarts before Memgraph can make a query exception and continue running 104 | # the pod. For further information, check the `resources` section in this file about setting pod memory and cpu limits. 105 | - "--also-log-to-stderr=true" 106 | 107 | # The explicit user and group setup is required because at the init container 108 | # time, there is not yet a user created. This seems fine because under both 109 | # Memgraph and Mage images we actually hard-code the user and group id. The 110 | # config is used to chown user storage and core dumps claims' month paths. 111 | memgraphUserGroupId: "101:103" 112 | 113 | secrets: 114 | enabled: false 115 | name: memgraph-secrets 116 | userKey: USER 117 | passwordKey: PASSWORD 118 | 119 | ## Memgraph Enterprise Licence 120 | # memgraphEnterpriseLicense: "" 121 | # memgraphOrganizationName: "" 122 | 123 | memgraphEnterpriseLicense: 124 | memgraphOrganizationName: 125 | 126 | # Annotations to add to the statefulSet 127 | statefulSetAnnotations: {} 128 | # Annotations to add to the Pod 129 | podAnnotations: {} 130 | 131 | resources: {} 132 | # We usually recommend not to specify default resources and to leave this as a conscious 133 | # choice for the user. This also increases chances charts run on environments with little 134 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 135 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 136 | # limits: 137 | # cpu: 100m 138 | # memory: 128Mi 139 | # requests: 140 | # cpu: 100m 141 | # memory: 128Mi 142 | 143 | serviceAccount: 144 | # Specifies whether a service account should be created 145 | # If set to false and the name is provided, this service account must exist 146 | create: true 147 | # Annotations to add to the service account 148 | annotations: {} 149 | # The name of the service account to use. 150 | # If not set and create is true, a name is generated using the fullname template 151 | name: "" 152 | 153 | container: 154 | terminationGracePeriodSeconds: 1800 155 | # When a container is ready to be used. Disabled until startupProbe succeeds. 156 | readinessProbe: 157 | tcpSocket: 158 | port: 7687 159 | failureThreshold: 20 160 | timeoutSeconds: 10 161 | periodSeconds: 5 162 | # To know when a container needs to be restarted. 163 | # Disabled until startupProbe succeeds. 164 | livenessProbe: 165 | tcpSocket: 166 | port: 7687 167 | failureThreshold: 20 168 | timeoutSeconds: 10 169 | periodSeconds: 5 170 | # When restoring Memgraph from a backup, it is important to give enough time app to start. Here, we set it to 2h by default. 171 | startupProbe: 172 | tcpSocket: 173 | port: 7687 174 | failureThreshold: 1440 175 | periodSeconds: 5 176 | 177 | # List of custom query modules to be mounted into the app. 178 | # These will be loaded automatically, on startup. 179 | # Each module must be exposed by a ConfigMap under a specific file name. 180 | customQueryModules: [] 181 | 182 | # Must be an existing ConfigMap 183 | # - volume: "" 184 | # Must be present in the ConfigMap referenced with `volume` 185 | # file: "" 186 | 187 | 188 | # If you are experiencing issues with the sysctlInitContainer, you can disable it here. 189 | # This is made to increase the max_map_count, necessary for high memory loads in Memgraph 190 | # If you are experiencing crashing pod with the: Max virtual memory areas vm.max_map_count is too low 191 | # you can increase the maxMapCount value. 192 | # You can see what's the proper value for this parameter by reading 193 | # https://memgraph.com/docs/database-management/system-configuration#recommended-values-for-the-vmmax_map_count-parameter 194 | sysctlInitContainer: 195 | enabled: true 196 | maxMapCount: 262144 197 | image: 198 | repository: library/busybox 199 | tag: latest 200 | pullPolicy: IfNotPresent 201 | -------------------------------------------------------------------------------- /docker-compose/HA_register.cypher: -------------------------------------------------------------------------------- 1 | ADD COORDINATOR 2 WITH CONFIG {"bolt_server": "coord2:7691", "coordinator_server": "coord2:10112"}; 2 | ADD COORDINATOR 3 WITH CONFIG {"bolt_server": "coord3:7692", "coordinator_server": "coord3:10113"}; 3 | 4 | REGISTER INSTANCE instance_1 WITH CONFIG {"bolt_server": "instance1:7687", "management_server": "instance1:10011", "replication_server": "instance1:10001"}; 5 | REGISTER INSTANCE instance_2 WITH CONFIG {"bolt_server": "instance2:7688", "management_server": "instance2:10012", "replication_server": "instance2:10002"}; 6 | REGISTER INSTANCE instance_3 WITH CONFIG {"bolt_server": "instance3:7689", "management_server": "instance3:10013", "replication_server": "instance3:10003"}; 7 | SET INSTANCE instance_3 TO MAIN; 8 | -------------------------------------------------------------------------------- /docker-compose/README.md: -------------------------------------------------------------------------------- 1 | ## Instructions 2 | 3 | This directory contains all necessary code needed to run your own highly-available Memgraph cluster with one command, `docker compose up`. 4 | The only thing you need to do is add your `ORGANIZATION NAME` and `ENTERPRISE LICENSE` in license.cypher file. 5 | -------------------------------------------------------------------------------- /docker-compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | networks: 2 | memgraph_ha: 3 | name: memgraph_ha 4 | driver: bridge 5 | ipam: 6 | driver: default 7 | config: 8 | - subnet: "172.21.0.0/16" 9 | 10 | services: 11 | coord1: 12 | image: "memgraph/memgraph" 13 | container_name: coord1 14 | volumes: 15 | - ./license.cypher:/tmp/init/license.cypher:ro 16 | - ./HA_register.cypher:/tmp/init/HA_register.cypher:ro 17 | environment: 18 | - MEMGRAPH_HA_CLUSTER_INIT_QUERIES=/tmp/init/HA_register.cypher 19 | command: [ "--init-file=/tmp/init/license.cypher", "--log-level=TRACE", "--data-directory=/tmp/mg_data_coord1", "--log-file=/tmp/coord1.log", "--also-log-to-stderr", "--coordinator-id=1", "--coordinator-port=10111", "--coordinator-hostname=coord1"] 20 | networks: 21 | memgraph_ha: 22 | ipv4_address: 172.21.0.4 23 | 24 | coord2: 25 | image: "memgraph/memgraph" 26 | container_name: coord2 27 | volumes: 28 | - ./license.cypher:/tmp/init/license.cypher:ro 29 | command: [ "--init-file=/tmp/init/license.cypher", "--log-level=TRACE", "--data-directory=/tmp/mg_data_coord2", "--log-file=/tmp/coord2.log", "--also-log-to-stderr", "--coordinator-id=2", "--coordinator-port=10112", "--coordinator-hostname=coord2"] 30 | networks: 31 | memgraph_ha: 32 | ipv4_address: 172.21.0.2 33 | 34 | coord3: 35 | image: "memgraph/memgraph" 36 | container_name: coord3 37 | volumes: 38 | - ./license.cypher:/tmp/init/license.cypher:ro 39 | command: [ "--init-file=/tmp/init/license.cypher", "--log-level=TRACE", "--data-directory=/tmp/mg_data_coord3", "--log-file=/tmp/coord3.log", "--also-log-to-stderr", "--coordinator-id=3", "--coordinator-port=10113", "--coordinator-hostname=coord3"] 40 | 41 | networks: 42 | memgraph_ha: 43 | ipv4_address: 172.21.0.3 44 | 45 | instance1: 46 | image: "memgraph/memgraph" 47 | container_name: instance1 48 | volumes: 49 | - ./license.cypher:/tmp/init/license.cypher:ro 50 | command: ["--init-file=/tmp/init/license.cypher","--data-recovery-on-startup=true", "--log-level=TRACE", "--data-directory=/tmp/mg_data_instance1", "--log-file=/tmp/instance1.log", "--also-log-to-stderr", "--management-port=10011"] 51 | networks: 52 | memgraph_ha: 53 | ipv4_address: 172.21.0.6 54 | 55 | instance2: 56 | image: "memgraph/memgraph" 57 | container_name: instance2 58 | volumes: 59 | - ./license.cypher:/tmp/init/license.cypher:ro 60 | command: ["--init-file=/tmp/init/license.cypher","--data-recovery-on-startup=true", "--log-level=TRACE", "--data-directory=/tmp/mg_data_instance2", "--log-file=/tmp/instance2.log", "--also-log-to-stderr", "--management-port=10012"] 61 | networks: 62 | memgraph_ha: 63 | ipv4_address: 172.21.0.7 64 | 65 | instance3: 66 | image: "memgraph/memgraph" 67 | container_name: instance3 68 | volumes: 69 | - ./license.cypher:/tmp/init/license.cypher:ro 70 | command: ["--init-file=/tmp/init/license.cypher","--data-recovery-on-startup=true", "--log-level=TRACE", "--data-directory=/tmp/mg_data_instance3", "--log-file=/tmp/instance3.log", "--also-log-to-stderr", "--management-port=10013"] 71 | networks: 72 | memgraph_ha: 73 | ipv4_address: 172.21.0.8 74 | -------------------------------------------------------------------------------- /docker-compose/license.cypher: -------------------------------------------------------------------------------- 1 | SET DATABASE SETTING 'organization.name' TO ''; 2 | SET DATABASE SETTING 'enterprise.license' TO ''; 3 | -------------------------------------------------------------------------------- /index.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | entries: 3 | memgraph: 4 | - apiVersion: v2 5 | appVersion: 1.16.0 6 | created: "2023-07-21T08:39:52.133849+02:00" 7 | description: MemgraphDB Helm Chart 8 | digest: d09182554cbece1d321cfd212dc43f8ff8fddd8cfd54036153de787123c4c4ce 9 | home: https://memgraph.com/ 10 | icon: https://public-assets.memgraph.com/memgraph-logo/logo-large.png 11 | keywords: 12 | - graph 13 | - database 14 | - cypher 15 | - analytics 16 | maintainers: 17 | - email: tech@memgraph.com 18 | name: Memgraph 19 | name: memgraph 20 | sources: 21 | - https://github.com/memgraph/memgraph 22 | type: application 23 | urls: 24 | - https://memgraph.github.io/helm-charts/memgraph-0.1.0.tgz 25 | version: 0.1.0 26 | generated: "2023-07-21T08:39:52.130559+02:00" 27 | -------------------------------------------------------------------------------- /scripts/aks.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | RESOURCE_GROUP="${RESOURCE_GROUP:-TestingResourceGroup}" 4 | LOCATION="${LOCATION:-northeurope}" 5 | CLUSTER_NAME="${CLUSTER_NAME:-memgraph-standalone}" 6 | CLUSTER_SIZE="${CLUSTER_SIZE:-1}" 7 | NODE_VM_SIZE="${NODE_VM_SIZE:-Standard_A2_v2}" 8 | 9 | # NOTE: Assumes installed az and being logged in 10 | # https://learn.microsoft.com/en-us/cli/azure/install-azure-cli. 11 | 12 | create_cluster() { 13 | az group create --name $RESOURCE_GROUP --location $LOCATION 14 | az aks create --resource-group $RESOURCE_GROUP --name $CLUSTER_NAME \ 15 | --node-count $CLUSTER_SIZE --node-vm-size $NODE_VM_SIZE --generate-ssh-keys 16 | az aks get-credentials --resource-group $RESOURCE_GROUP --name $CLUSTER_NAME 17 | } 18 | 19 | delete_cluster() { 20 | az group delete --name $RESOURCE_GROUP --yes 21 | } 22 | 23 | case $1 in 24 | create_cluster) 25 | create_cluster 26 | ;; 27 | delete_cluster) 28 | delete_cluster 29 | ;; 30 | *) 31 | echo "$0 create_cluster|delete_cluster" 32 | exit 1 33 | ;; 34 | esac 35 | -------------------------------------------------------------------------------- /scripts/gke.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | CLUSTER_NAME="${CLUSTER_NAME:-memgraph-standalone}" 4 | ZONE="${ZONE:-europe-west2-a}" 5 | CLUSTER_SIZE="${CLUSTER_SIZE:-1}" 6 | MACHINE_TYPE="${MACHINE_TYPE:-e2-medium}" 7 | 8 | # NOTE: Assumes installed gcloud (https://cloud.google.com/sdk/docs/install) 9 | # and init/login done. 10 | 11 | create_cluster() { 12 | gcloud container clusters create "$CLUSTER_NAME" \ 13 | --zone "$ZONE" \ 14 | --num-nodes $CLUSTER_SIZE \ 15 | --machine-type "$MACHINE_TYPE" 16 | gcloud container clusters get-credentials "$CLUSTER_NAME" --zone "$ZONE" 17 | } 18 | 19 | delete_cluster() { 20 | gcloud container clusters delete $CLUSTER_NAME --location $ZONE 21 | } 22 | 23 | list_clusters() { 24 | gcloud container clusters list 25 | } 26 | 27 | get_nodes() { 28 | gcloud container node-pools list --cluster "$CLUSTER_NAME" --zone "$ZONE" 29 | } 30 | 31 | case $1 in 32 | create_cluster) 33 | create_cluster 34 | ;; 35 | delete_cluster) 36 | delete_cluster 37 | ;; 38 | list_clusters) 39 | list_clusters 40 | ;; 41 | get_nodes) 42 | get_nodes 43 | ;; 44 | *) 45 | echo "$0 create_cluster|delete_cluster|list_clusters|get_nodes" 46 | exit 1 47 | ;; 48 | esac 49 | -------------------------------------------------------------------------------- /tutorials/gcp/README.md: -------------------------------------------------------------------------------- 1 | # Deploying Memgraph under GCP's GKE 2 | 3 | In general, to deploy GKE cluster follow the [offical 4 | documentation](https://cloud.google.com/kubernetes-engine/docs/deploy-app-cluster). 5 | For a specific example, take a look below. 6 | 7 | To install `gcloud` follow [install SDK 8 | instructions](https://cloud.google.com/sdk/docs/install-sdk). In addition, 9 | `gke-gcloud-auth-plugin` is required, to install it run: 10 | ``` 11 | gcloud components install kubectl 12 | ``` 13 | 14 | Check out our [gke.bash](../../scripts/gke.bash) script for basic management of 15 | the GKE k8s cluster. 16 | 17 | To make sure that the `kubectl` config is right or switch back to the previous 18 | context use the following commands: 19 | ``` 20 | kubectl config get-contexts 21 | kubectl config use-context 22 | ``` 23 | -------------------------------------------------------------------------------- /tutorials/ha-under-aws: -------------------------------------------------------------------------------- 1 | ../charts/memgraph-high-availability/aws -------------------------------------------------------------------------------- /tutorials/ha-under-azure: -------------------------------------------------------------------------------- 1 | ../charts/memgraph-high-availability/aks --------------------------------------------------------------------------------