├── .env
├── .gitignore
├── LICENSE
├── README.md
├── README_zh-CN.md
├── charts
├── index.yaml
├── nebula-0.2.0.tgz
└── nebula
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── configmap.yaml
│ ├── deployment.yaml
│ ├── pdb.yaml
│ ├── service.yaml
│ ├── serviceaccount.yaml
│ └── statefulset.yaml
│ └── values.yaml
├── docker-compose-lite.yaml
└── docker-compose.yaml
/.env:
--------------------------------------------------------------------------------
1 | TZ=UTC
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
3 | data/
4 | logs/
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
English | 中文
4 |
A distributed, scalable, lightning-fast graph database
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 | NebulaGraph can be deployed using several methods, with Docker Compose being one of the quickest and easiest. This repository contains Docker Compose configuration files for various versions of NebulaGraph, organized across different branches. Refer to the table below for the most commonly used branches, along with their corresponding NebulaGraph versions. Typically, the highest version number in the v3.x series represents the latest stable release.
17 |
18 | For specific minor versions of Docker images, such as 3.6.1, please consult the tags on Docker Hub, which can be found [here](https://hub.docker.com/r/vesoft/nebula-graphd/tags).
19 |
20 | | Branch of This Repo | NebulaGraph | Version Comment | Docs |
21 | | :----------------------------------------------------------: | ------------------------------------------------------------ | ----------------------------- | ------------------------------------------------------------ |
22 | | [`master`](https://github.com/vesoft-inc/nebula-docker-compose/tree/master) | `master` of the [nebula](https://github.com/vesoft-inc/nebula) repository | The latest dev build for v3.x | [Guide](https://docs.nebula-graph.io/master/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose/) |
23 | | [`v3.8`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.8.0) | `v3.8.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.8.x | [Guide](https://docs.nebula-graph.io/3.8.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose/) |
24 | | [`v3.6`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.6.0) | `v3.6.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.6.x | [Guide](https://docs.nebula-graph.io/3.6.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose/) |
25 | | [`v3.5`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.5.0) | `v3.5.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.5.x | [Guide](https://docs.nebula-graph.io/3.5.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose/) |
26 | | [`v3.4`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.4.0) | `v3.4.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.4.x | [Guide](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
27 | | [`v3.3`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.3.0) | `v3.3.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.3.x | [Guide](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
28 | | [`v3.2`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.2.0) | `v3.2.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.2.x | [Guide](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
29 | | [`v3.1`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.1.0) | `v3.1.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.1.x | [Guide](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
30 | | [`v3.0.1`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.0.1) | `v3.0.1` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.0.1 | [Guide](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
31 | | [`v2.6`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v2.6) | `v2.6` of the [nebula](https://github.com/vesoft-inc/nebula) repository | The last v2.x release | [Guide](https://github.com/vesoft-inc/nebula-docker-compose/blob/v2.6/README.md) |
32 | | [`v2.5.0`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v2.5.0) | `v2.5.0` of the [nebula-graph](https://github.com/vesoft-inc/nebula-graph) repository | v.2.5.0 | [Guide](https://github.com/vesoft-inc/nebula-docker-compose/blob/v2.5.0/README.md) |
33 | | [`v2.0.0`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v2.0.0) | `v2.0.0` of the nebula-graph repository | v.2.0.0-GA | [Guide](https://github.com/vesoft-inc/nebula-docker-compose/blob/v2.0.0/README.md) |
34 | | [`v1.0`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v1.0) | `master` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v1.0 | [Guide](https://github.com/vesoft-inc/nebula-docker-compose/blob/v1.0/README.md) |
35 |
--------------------------------------------------------------------------------
/README_zh-CN.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
中文 | English
4 |
能够容纳千亿个顶点和万亿条边,并提供毫秒级查询延时的图数据库解决方案
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | 部署 NebulaGraph 的方式有很多,使用 Docker Compose 是其中较方便的一种。本仓库是 NebulaGraph Docker Compose 的配置文件。
24 |
25 | 下表列出了常用分支以及与其相对应的 NebulaGraph 分支和版本,通常来说,v3.x 的最大版本就是最新的稳定版本。
26 |
27 | 更多小版本的 Docker 镜像分支(比如对应 3.6.1 版本的镜像),可以在 Docker Hub 上查询相应镜像的标签(tag),比如[这里](https://hub.docker.com/r/vesoft/nebula-graphd/tags)。
28 |
29 | | 本仓库分支 | NebulaGraph | 版本解释 | 如何部署 |
30 | | :----------------------------------------------------------: | ------------------------------------------------------------ | ------------------- | ------------------------------------------------------------ |
31 | | [`master`](https://github.com/vesoft-inc/nebula-docker-compose/tree/master) | `master` of the [nebula repository](https://github.com/vesoft-inc/nebula) | v3.x 的最新开发版本 | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
32 | | [`v3.8`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.8.0) | `v3.8.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.8.x | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
33 | | [`v3.6`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.6.0) | `v3.6.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.6.x | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
34 | | [`v3.5`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.5.0) | `v3.5.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.5.x | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
35 | | [`v3.4`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.4.0) | `v3.4.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.4.x | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
36 | | [`v3.3`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.3.0) | `v3.3.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.3.x | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
37 | | [`v3.2`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.2.0) | `v3.2.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.2.x | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
38 | | [`v3.1`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.1.0) | `v3.1.x` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v3.1.x | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
39 | | [`v3.0.1`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v3.0.1) | `v3.0.1` of the [nebula repository](https://github.com/vesoft-inc/nebula) | v3.0.1 | [文档](https://docs.nebula-graph.io/2.0/2.quick-start/2.deploy-nebula-graph-with-docker-compose/) |
40 | | [`v2.6`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v2.6) | `v2.6` of the nebula-graph repository | v2.x 的最后发布 | [文档](https://github.com/vesoft-inc/nebula-docker-compose/blob/v2.6/README.md) |
41 | | [`v2.5.0`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v2.5.0) | `v2.5.0` of the nebula-graph repository | v.2.5.0 | [文档](https://github.com/vesoft-inc/nebula-docker-compose/blob/v2.5.0/README.md) |
42 | | [`v2.0.0`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v2.0.0) | `v2.0.0` of the nebula-graph repository | v.2.0.0-GA | [文档](https://github.com/vesoft-inc/nebula-docker-compose/blob/v2.0.0/README.md) |
43 | | [`v1.0`](https://github.com/vesoft-inc/nebula-docker-compose/tree/v1.0) | `master` of the [nebula](https://github.com/vesoft-inc/nebula) repository | v1 | [文档](https://github.com/vesoft-inc/nebula-docker-compose/blob/v1.0/README.md) |
44 |
--------------------------------------------------------------------------------
/charts/index.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | entries:
3 | nebula:
4 | - apiVersion: v1
5 | appVersion: v2.0.0
6 | created: "2021-04-15T11:11:45.421212548+08:00"
7 | description: Official Nebula Graph Helm chart for Kubernetes
8 | digest: c3e8259705a1d4c17478c704ad489733f4b6d0bf3c4edb241f8c6815e0f199c3
9 | home: https://nebula-graph.io
10 | icon: https://github.com/vesoft-inc/nebula/raw/master/docs/logo.png
11 | keywords:
12 | - k8s
13 | - nebula
14 | - database
15 | - graph
16 | maintainers:
17 | - email: kevin.qiao@vesoft.com
18 | name: kevin
19 | url: https://github.com/MegaByte875
20 | - email: kenshin.liu@vesoft.com
21 | name: kenshin
22 | url: https://github.com/kqzh
23 | name: nebula
24 | sources:
25 | - https://github.com/vesoft-inc/nebula-graph
26 | urls:
27 | - https://vesoft-inc.github.io/nebula-docker-compose/charts/nebula-0.2.0.tgz
28 | version: 0.2.0
29 | generated: "2021-04-15T11:11:45.420157091+08:00"
30 |
--------------------------------------------------------------------------------
/charts/nebula-0.2.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vesoft-inc/nebula-docker-compose/35815f17abe9975595579c41376c11ec9e2cc7f0/charts/nebula-0.2.0.tgz
--------------------------------------------------------------------------------
/charts/nebula/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/charts/nebula/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | appVersion: v2.0.0
3 | description: Official Nebula Graph Helm chart for Kubernetes
4 | home: https://nebula-graph.io
5 | icon: https://github.com/vesoft-inc/nebula/raw/master/docs/logo.png
6 | keywords:
7 | - k8s
8 | - nebula
9 | - database
10 | - graph
11 | maintainers:
12 | - email: kevin.qiao@vesoft.com
13 | name: kevin
14 | url: https://github.com/MegaByte875
15 | - email: kenshin.liu@vesoft.com
16 | name: kenshin
17 | url: https://github.com/kqzh
18 | name: nebula
19 | sources:
20 | - https://github.com/vesoft-inc/nebula-graph
21 | version: 0.2.0
22 |
--------------------------------------------------------------------------------
/charts/nebula/README.md:
--------------------------------------------------------------------------------
1 | # Nebula Helm Chart
2 |
3 | Nebula Graph Helm chart for Kubernetes
4 |
5 | ### Requirements
6 |
7 | * Kubernetes >= 1.14
8 | * [CoreDNS][] >= 1.6.0
9 | * [Helm][] >= 3.2.0
10 |
11 | ## Get Repo Info
12 |
13 | ```console
14 | helm repo add nebula-graph https://vesoft-inc.github.io/nebula-docker-compose/charts
15 | helm repo update
16 | ```
17 |
18 | _See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
19 |
20 | ## Install Chart
21 |
22 | ```console
23 | # Helm 3
24 | # helm install [NAME] [CHART] [flags]
25 | $ helm install nebula nebula-graph/nebula --version
26 | ```
27 |
28 | _See [configuration](#configuration) below._
29 |
30 | _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
31 |
32 | ## Uninstall Chart
33 |
34 | ```console
35 | # Helm 3
36 | $ helm uninstall nebula
37 | ```
38 |
39 | ## Configuration
40 |
41 | See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](https://github.com/vesoft-inc/nebula-docker-compose/blob/master/charts/nebula/values.yaml), or run these configuration commands:
42 |
43 | ```console
44 | # Helm 3
45 | $ helm show values nebula-graph/nebula
46 | ```
47 |
48 |
49 | [helm]: https://helm.sh
50 | [coredns]: https://github.com/coredns/coredns
--------------------------------------------------------------------------------
/charts/nebula/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | Nebula Graph Cluster installed!
2 |
3 | 1. Watch all containers come up.
4 | $ kubectl get pods --namespace={{ .Release.Namespace }} -l app.kubernetes.io={{ template "nebula.fullname" . }} -w
5 |
6 |
--------------------------------------------------------------------------------
/charts/nebula/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "nebula.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "nebula.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "nebula.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "nebula.labels" -}}
37 | helm.sh/chart: {{ include "nebula.chart" . }}
38 | {{ include "nebula.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "nebula.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "nebula.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "nebula.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "nebula.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
64 | {{/*
65 | Compute the maximum number of unavailable replicas for the PodDisruptionBudget.
66 | */}}
67 | {{- define "nebula.pdb.maxUnavailable" -}}
68 | {{- if eq (int .Values.replication.metad.replicas) 1 }}
69 | {{- 0 }}
70 | {{- else if .Values.disruptionBudget.maxUnavailable }}
71 | {{- .Values.disruptionBudget.maxUnavailable }}
72 | {{- else }}
73 | {{- if eq (int .Values.replication.metad.replicas) 3 }}
74 | {{- 1 }}
75 | {{- else }}
76 | {{- sub (div (int .Values.replication.metad.replicas) 2) 1 }}
77 | {{- end }}
78 | {{- end }}
79 | {{- end }}
80 |
81 | {{/*
82 | Generate dns address based endpoints for metad.
83 | */}}
84 | {{- define "nebula.metad.endpoints" -}}
85 | {{- $endpoints := list -}}
86 | {{- $namesapce := .Release.Namespace -}}
87 | {{- $thriftPort := .Values.port.metad.thriftPort | toString -}}
88 | {{- $replicas := .Values.replication.metad.replicas | int -}}
89 | {{- if .Values.hostNetwork }}
90 | {{- join "," .Values.metadEndpoints }}
91 | {{- else }}
92 | {{- $name := print "nebula-metad" -}}
93 | {{- range $i, $e := until $replicas }}
94 | {{- $endpoints = printf "%s-%d.nebula-metad.%s.svc.cluster.local:%s" $name $i $namesapce $thriftPort | append $endpoints }}
95 | {{- end }}
96 | {{- join "," $endpoints }}
97 | {{- end }}
98 | {{- end }}
99 |
100 | {{/*
101 | Generate container command for metad.
102 | */}}
103 | {{- define "nebula.metad.args" -}}
104 | {{- $args := .Values.commandArgs.metad | first -}}
105 | {{- $newArgs := list -}}
106 | {{- $namesapce := .Release.Namespace -}}
107 | {{- if .Values.hostNetwork }}
108 | {{- $args = printf "%s --local_ip=$(hostname -i)" $args }}
109 | {{- $newArgs = $args | quote | append $newArgs }}
110 | {{- $newArgs }}
111 | {{- else }}
112 | {{- $args = printf "%s --local_ip=$(hostname).nebula-metad.%s.svc.cluster.local" $args $namesapce }}
113 | {{- $newArgs = $args | quote | append $newArgs }}
114 | {{- $newArgs }}
115 | {{- end }}
116 | {{- end }}
117 |
118 | {{/*
119 | Generate container command for storaged.
120 | */}}
121 | {{- define "nebula.storaged.args" -}}
122 | {{- $args := .Values.commandArgs.storaged | first -}}
123 | {{- $newArgs := list -}}
124 | {{- $namesapce := .Release.Namespace -}}
125 | {{- if .Values.hostNetwork }}
126 | {{- $args = printf "%s --local_ip=$(hostname -i)" $args }}
127 | {{- $newArgs = $args | quote | append $newArgs }}
128 | {{- $newArgs }}
129 | {{- else }}
130 | {{- $args = printf "%s --local_ip=$(hostname).nebula-storaged.%s.svc.cluster.local" $args $namesapce }}
131 | {{- $newArgs = $args | quote | append $newArgs }}
132 | {{- $newArgs }}
133 | {{- end }}
134 | {{- end }}
135 |
--------------------------------------------------------------------------------
/charts/nebula/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: nebula-graphd
5 | data:
6 | nebula-graphd.conf: |
7 | ########## basics ##########
8 | # Whether to run as a daemon process
9 | --daemonize=false
10 | # The file to host the process id
11 | --pid_file=pids/nebula-graphd.pid
12 | ########## logging ##########
13 | # The directory to host logging files, which must already exists
14 | --log_dir=logs
15 | # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
16 | --minloglevel=2
17 | # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
18 | --v=0
19 | # Maximum seconds to buffer the log messages
20 | --logbufsecs=0
21 | # Whether to redirect stdout and stderr to separate output files
22 | --redirect_stdout=true
23 | # Destination filename of stdout and stderr, which will also reside in log_dir.
24 | --stdout_log_file=stdout.log
25 | --stderr_log_file=stderr.log
26 | # Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
27 | --stderrthreshold=2
28 |
29 | ########## networking ##########
30 | # Meta Server Address
31 | --meta_server_addrs={{ template "nebula.metad.endpoints" . }}
32 | # Local ip
33 | --local_ip=0.0.0.0
34 | # Network device to listen on
35 | --listen_netdev=any
36 | # Port to listen on
37 | --port={{ .Values.port.graphd.thriftPort }}
38 | # To turn on SO_REUSEPORT or not
39 | --reuse_port=false
40 | # Backlog of the listen socket, adjust this together with net.core.somaxconn
41 | --listen_backlog=1024
42 | # Seconds before the idle connections are closed, 0 for never closed
43 | --client_idle_timeout_secs=0
44 | # Seconds before the idle sessions are expired, 0 for no expiration
45 | --session_idle_timeout_secs=60000
46 | # The number of threads to accept incoming connections
47 | --num_accept_threads=1
48 | # The number of networking IO threads, 0 for # of CPU cores
49 | --num_netio_threads=0
50 | # The number of threads to execute user queries, 0 for # of CPU cores
51 | --num_worker_threads=0
52 | # HTTP service ip
53 | --ws_ip=0.0.0.0
54 | # HTTP service port
55 | --ws_http_port={{ .Values.port.graphd.httpPort }}
56 | # HTTP2 service port
57 | --ws_h2_port={{ .Values.port.graphd.http2Port }}
58 | # The default charset when a space is created
59 | --default_charset=utf8
60 | # The defaule collate when a space is created
61 | --default_collate=utf8_bin
62 |
63 | ########## authorization ##########
64 | # Enable authorization
65 | --enable_authorize=false
66 |
67 | ########## Authentication ##########
68 | # User login authentication type, password for nebula authentication, ldap for ldap authentication, cloud for cloud authentication
69 | --auth_type=password
70 |
71 | ---
72 | apiVersion: v1
73 | kind: ConfigMap
74 | metadata:
75 | name: nebula-metad
76 | data:
77 | nebula-metad.conf: |
78 | ########## basics ##########
79 | # Whether to run as a daemon process
80 | --daemonize=false
81 | # The file to host the process id
82 | --pid_file=pids/nebula-metad.pid
83 |
84 | ########## logging ##########
85 | # The directory to host logging files, which must already exists
86 | --log_dir=logs
87 | # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
88 | --minloglevel=2
89 | # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
90 | --v=0
91 | # Maximum seconds to buffer the log messages
92 | --logbufsecs=0
93 |
94 | ########## networking ##########
95 | # Meta Server Address
96 | --meta_server_addrs={{ template "nebula.metad.endpoints" . }}
97 | # Local ip
98 | --local_ip=0.0.0.0
99 | # Meta daemon listening port
100 | --port={{ .Values.port.metad.thriftPort }}
101 | # HTTP service ip
102 | --ws_ip=0.0.0.0
103 | # HTTP service port
104 | --ws_http_port={{ .Values.port.metad.httpPort }}
105 | # HTTP2 service port
106 | --ws_h2_port={{ .Values.port.metad.http2Port }}
107 |
108 | ########## storage ##########
109 | # Root data path, here should be only single path for metad
110 | --data_path=data/meta
111 |
112 | ########## Misc #########
113 | # The default number of parts when a space is created
114 | --default_parts_num=100
115 | # The default replica factor when a space is created
116 | --default_replica_factor=1
117 | # Heartbeat interval
118 | --heartbeat_interval_secs=10
119 | # Hosts will be removed in this time if no heartbeat received
120 | --removed_threshold_sec=60
121 |
122 | ---
123 | apiVersion: v1
124 | kind: ConfigMap
125 | metadata:
126 | name: nebula-storaged
127 | data:
128 | nebula-storaged.conf: |
129 | ########## basics ##########
130 | # Whether to run as a daemon process
131 | --daemonize=false
132 | # The file to host the process id
133 | --pid_file=pids/nebula-storaged.pid
134 |
135 | ########## logging ##########
136 | # The directory to host logging files, which must already exists
137 | --log_dir=logs
138 | # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
139 | --minloglevel=2
140 | # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
141 | --v=0
142 | # Maximum seconds to buffer the log messages
143 | --logbufsecs=0
144 | ########## networking ##########
145 | # Meta server address
146 | --meta_server_addrs={{ template "nebula.metad.endpoints" . }}
147 | # Local ip
148 | --local_ip=0.0.0.0
149 | # Storage daemon listening port
150 | --port={{ .Values.port.storaged.thriftPort }}
151 | # HTTP service ip
152 | --ws_ip=0.0.0.0
153 | # HTTP service port
154 | --ws_http_port={{ .Values.port.storaged.httpPort }}
155 | # HTTP2 service port
156 | --ws_h2_port={{ .Values.port.storaged.http2Port }}
157 |
158 | ######### Raft #########
159 | # Raft election timeout
160 | --raft_heartbeat_interval_secs=30
161 | # RPC timeout for raft client (ms)
162 | --raft_rpc_timeout_ms=500
163 | ## recycle Raft WAL
164 | --wal_ttl=14400
165 |
166 | ########## Disk ##########
167 | # Root data path. Split by comma. e.g. --data_path=/disk1/path1/,/disk2/path2/
168 | # One path per Rocksdb instance.
169 | --data_path=data/storage
170 |
171 | # The default reserved bytes for one batch operation
172 | --rocksdb_batch_size=4096
173 | # The default block cache size used in BlockBasedTable.
174 | # The unit is MB.
175 | --rocksdb_block_cache=4
176 | # The type of storage engine, `rocksdb', `memory', etc.
177 | --engine_type=rocksdb
178 |
179 | # Compression algorithm, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
180 | # For the sake of binary compatibility, the default value is snappy.
181 | # Recommend to use:
182 | # * lz4 to gain more CPU performance, with the same compression ratio with snappy
183 | # * zstd to occupy less disk space
184 | # * lz4hc for the read-heavy write-light scenario
185 | --rocksdb_compression=lz4
186 |
187 | # Set different compressions for different levels
188 | # For example, if --rocksdb_compression is snappy,
189 | # "no:no:lz4:lz4::zstd" is identical to "no:no:lz4:lz4:snappy:zstd:snappy"
190 | # In order to disable compression for level 0/1, set it to "no:no"
191 | --rocksdb_compression_per_level=
192 |
193 | # Whether or not to enable rocksdb's statistics, disabled by default
194 | --enable_rocksdb_statistics=false
195 |
196 | # Statslevel used by rocksdb to collection statistics, optional values are
197 | # * kExceptHistogramOrTimers, disable timer stats, and skip histogram stats
198 | # * kExceptTimers, Skip timer stats
199 | # * kExceptDetailedTimers, Collect all stats except time inside mutex lock AND time spent on compression.
200 | # * kExceptTimeForMutex, Collect all stats except the counters requiring to get time inside the mutex lock.
201 | # * kAll, Collect all stats
202 | --rocksdb_stats_level=kExceptHistogramOrTimers
203 |
204 | # Whether or not to enable rocksdb's prefix bloom filter, disabled by default.
205 | --enable_rocksdb_prefix_filtering=false
206 | # Whether or not to enable the whole key filtering.
207 | --enable_rocksdb_whole_key_filtering=true
208 | # The prefix length for each key to use as the filter value.
209 | # can be 12 bytes(PartitionId + VertexID), or 16 bytes(PartitionId + VertexID + TagID/EdgeType).
210 | --rocksdb_filtering_prefix_length=12
211 |
212 | ############## rocksdb Options ##############
213 | --rocksdb_disable_wal=true
214 | # rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma
215 | --rocksdb_db_options={}
216 | # rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
217 | --rocksdb_column_family_options={"write_buffer_size":"67108864","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456"}
218 | # rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
219 | --rocksdb_block_based_table_options={"block_size":"8192"}
220 |
221 | ############### misc ####################
222 | --max_handlers_per_req=1
223 | --heartbeat_interval_secs=10
224 |
--------------------------------------------------------------------------------
/charts/nebula/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app.kubernetes.io: nebula
6 | app.kubernetes.io/component: nebula-graphd
7 | name: nebula-graphd
8 | spec:
9 | selector:
10 | matchLabels:
11 | app.kubernetes.io/component: nebula-graphd
12 | replicas: {{ .Values.replication.graphd.replicas }}
13 | strategy:
14 | rollingUpdate:
15 | maxSurge: 1
16 | maxUnavailable: 1
17 | type: RollingUpdate
18 | template:
19 | metadata:
20 | labels:
21 | app.kubernetes.io/component: nebula-graphd
22 | spec:
23 | restartPolicy: Always
24 | {{- if .Values.hostNetwork }}
25 | hostNetwork: true
26 | {{- end }}
27 | serviceAccountName: {{ include "nebula.serviceAccountName" . }}
28 | containers:
29 | - name: nebula-graphd
30 | image: "{{ .Values.image.graphd.repository }}:{{ .Values.image.graphd.tag }}"
31 | imagePullPolicy: {{ .Values.image.graphd.pullPolicy }}
32 | command: ["/bin/bash", "-ecx"]
33 | args: ["exec /usr/local/nebula/bin/nebula-graphd --flagfile=/usr/local/nebula/etc/nebula-graphd.conf --daemonize=false"]
34 | env:
35 | - name: USER
36 | value: root
37 | ports:
38 | - name: thrift
39 | containerPort: {{ .Values.port.graphd.thriftPort }}
40 | - name: http
41 | containerPort: {{ .Values.port.graphd.httpPort }}
42 | - name: http2
43 | containerPort: {{ .Values.port.graphd.http2Port }}
44 | livenessProbe:
45 | httpGet:
46 | path: {{ .Values.livenessProbe.httpGet.path }}
47 | port: {{ .Values.port.graphd.httpPort }}
48 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
49 | timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
50 | resources:
51 | requests:
52 | cpu: "{{ .Values.resources.graphd.requests.cpu }}"
53 | memory: "{{ .Values.resources.graphd.requests.memory }}"
54 | limits:
55 | cpu: "{{ .Values.resources.graphd.limits.cpu }}"
56 | memory: "{{ .Values.resources.graphd.limits.memory }}"
57 | volumeMounts:
58 | - name: config
59 | mountPath: /usr/local/nebula/etc/
60 | - name: timezone
61 | mountPath: /etc/localtime
62 | volumes:
63 | - name: timezone
64 | hostPath:
65 | path: /etc/localtime
66 | - name: config
67 | configMap:
68 | name: nebula-graphd
69 | {{- with .Values.nodeSelector }}
70 | nodeSelector:
71 | {{- toYaml . | nindent 8 }}
72 | {{- end }}
73 | {{- with .Values.affinity }}
74 | affinity:
75 | {{- toYaml . | nindent 8 }}
76 | {{- end }}
77 | {{- with .Values.tolerations }}
78 | tolerations:
79 | {{- toYaml . | nindent 8 }}
80 | {{- end }}
81 | {{- if .Values.priorityClassName }}
82 | priorityClassName: {{ .Values.priorityClassName | quote }}
83 | {{- end }}
84 |
--------------------------------------------------------------------------------
/charts/nebula/templates/pdb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | name: nebula-budget
5 | labels:
6 | app: nebula-budget
7 | spec:
8 | selector:
9 | matchLabels:
10 | app.kubernetes.io/component-type: stateful
11 | maxUnavailable: {{ template "nebula.pdb.maxUnavailable" .}}
--------------------------------------------------------------------------------
/charts/nebula/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io: nebula
6 | app.kubernetes.io/component: nebula-metad
7 | name: nebula-metad
8 | spec:
9 | publishNotReadyAddresses: false
10 | clusterIP: None
11 | ports:
12 | - name: thrift
13 | port: {{ .Values.port.metad.thriftPort }}
14 | - name: http
15 | port: {{ .Values.port.metad.httpPort }}
16 | - name: http2
17 | port: {{ .Values.port.metad.http2Port }}
18 | selector:
19 | app.kubernetes.io/component: nebula-metad
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | app.kubernetes.io: nebula
26 | app.kubernetes.io/component: nebula-storaged
27 | name: nebula-storaged
28 | spec:
29 | publishNotReadyAddresses: false
30 | clusterIP: None
31 | ports:
32 | - name: thrift
33 | port: {{ .Values.port.storaged.thriftPort }}
34 | - name: http
35 | port: {{ .Values.port.storaged.httpPort }}
36 | - name: http2
37 | port: {{ .Values.port.storaged.http2Port }}
38 | selector:
39 | app.kubernetes.io/component: nebula-storaged
40 | ---
41 | apiVersion: v1
42 | kind: Service
43 | metadata:
44 | labels:
45 | app.kubernetes.io: nebula
46 | app.kubernetes.io/component: nebula-graphd
47 | name: nebula-graphd
48 | spec:
49 | publishNotReadyAddresses: false
50 | type: NodePort
51 | ports:
52 | - name: thrift
53 | port: {{ .Values.port.graphd.thriftPort }}
54 | - name: http
55 | port: {{ .Values.port.graphd.httpPort }}
56 | - name: http2
57 | port: {{ .Values.port.graphd.http2Port }}
58 | selector:
59 | app.kubernetes.io/component: nebula-graphd
60 |
--------------------------------------------------------------------------------
/charts/nebula/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "nebula.serviceAccountName" . }}
6 | labels:
7 | {{- include "nebula.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/charts/nebula/templates/statefulset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: nebula-metad
5 | labels:
6 | app.kubernetes.io: nebula
7 | app.kubernetes.io/component: nebula-metad
8 | app.kubernetes.io/component-type: stateful
9 | spec:
10 | serviceName: nebula-metad
11 | replicas: {{ .Values.replication.metad.replicas }}
12 | selector:
13 | matchLabels:
14 | app.kubernetes.io/component: nebula-metad
15 | template:
16 | metadata:
17 | labels:
18 | app.kubernetes.io/component: nebula-metad
19 | spec:
20 | affinity:
21 | podAntiAffinity:
22 | requiredDuringSchedulingIgnoredDuringExecution:
23 | - labelSelector:
24 | matchExpressions:
25 | - key: app.kubernetes.io/component
26 | operator: In
27 | values:
28 | - nebula-metad
29 | topologyKey: "kubernetes.io/hostname"
30 | restartPolicy: Always
31 | {{- if .Values.hostNetwork }}
32 | hostNetwork: true
33 | {{- end }}
34 | serviceAccountName: {{ include "nebula.serviceAccountName" . }}
35 | containers:
36 | - name: nebula-metad
37 | image: "{{ .Values.image.metad.repository }}:{{ .Values.image.metad.tag }}"
38 | imagePullPolicy: {{ .Values.image.metad.pullPolicy }}
39 | command: ["/bin/bash", "-ecx"]
40 | args: {{ template "nebula.metad.args" . }}
41 | env:
42 | - name: USER
43 | value: root
44 | resources:
45 | requests:
46 | cpu: {{ .Values.resources.metad.requests.cpu | quote }}
47 | memory: {{ .Values.resources.metad.requests.memory | quote }}
48 | limits:
49 | cpu: {{ .Values.resources.metad.limits.cpu | quote }}
50 | memory: {{ .Values.resources.metad.limits.memory | quote }}
51 | ports:
52 | - containerPort: {{ .Values.port.metad.thriftPort }}
53 | name: thrift
54 | - containerPort: {{ .Values.port.metad.httpPort }}
55 | name: http
56 | - containerPort: {{ .Values.port.metad.http2Port }}
57 | name: http2
58 | livenessProbe:
59 | httpGet:
60 | path: {{ .Values.livenessProbe.httpGet.path }}
61 | port: {{ .Values.port.metad.httpPort }}
62 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
63 | timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
64 | volumeMounts:
65 | - name: config
66 | mountPath: /usr/local/nebula/etc/
67 | - name: timezone
68 | mountPath: /etc/localtime
69 | - name: metad
70 | mountPath: /usr/local/nebula/data
71 | subPath: data
72 | - name: metad
73 | mountPath: /usr/local/nebula/logs
74 | subPath: logs
75 | volumes:
76 | - name: config
77 | configMap:
78 | name: nebula-metad
79 | - name: timezone
80 | hostPath:
81 | path: /etc/localtime
82 | - name: metad
83 | persistentVolumeClaim:
84 | claimName: metad
85 | terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
86 | {{- with .Values.nodeSelector }}
87 | nodeSelector:
88 | {{- toYaml . | nindent 8 }}
89 | {{- end }}
90 | {{- with .Values.affinity }}
91 | affinity:
92 | {{- toYaml . | nindent 8 }}
93 | {{- end }}
94 | {{- with .Values.tolerations }}
95 | tolerations:
96 | {{- toYaml . | nindent 8 }}
97 | {{- end }}
98 | {{- if .Values.priorityClassName }}
99 | priorityClassName: {{ .Values.priorityClassName | quote }}
100 | {{- end }}
101 | podManagementPolicy: Parallel
102 | updateStrategy:
103 | type: RollingUpdate
104 | volumeClaimTemplates:
105 | - metadata:
106 | name: metad
107 | spec:
108 | accessModes: [ "ReadWriteOnce" ]
109 | storageClassName: {{ .Values.storage.storageClass }}
110 | resources:
111 | requests:
112 | storage: {{ .Values.storage.metad.size }}
113 |
114 | ---
115 | apiVersion: apps/v1
116 | kind: StatefulSet
117 | metadata:
118 | name: nebula-storaged
119 | labels:
120 | app.kubernetes.io: nebula
121 | app.kubernetes.io/component: nebula-storaged
122 | app.kubernetes.io/component-type: stateful
123 | spec:
124 | serviceName: nebula-storaged
125 | replicas: {{ .Values.replication.storaged.replicas }}
126 | selector:
127 | matchLabels:
128 | app.kubernetes.io/component: nebula-storaged
129 | template:
130 | metadata:
131 | labels:
132 | app.kubernetes.io/component: nebula-storaged
133 | spec:
134 | affinity:
135 | podAntiAffinity:
136 | requiredDuringSchedulingIgnoredDuringExecution:
137 | - labelSelector:
138 | matchExpressions:
139 | - key: app.kubernetes.io/component
140 | operator: In
141 | values:
142 | - nebula-storaged
143 | topologyKey: "kubernetes.io/hostname"
144 | restartPolicy: Always
145 | {{- if .Values.hostNetwork }}
146 | hostNetwork: true
147 | {{- end }}
148 | serviceAccountName: {{ include "nebula.serviceAccountName" . }}
149 | containers:
150 | - name: nebula-storaged
151 | image: "{{ .Values.image.storaged.repository }}:{{ .Values.image.storaged.tag }}"
152 | imagePullPolicy: {{ .Values.image.storaged.pullPolicy }}
153 | command: ["/bin/bash", "-ecx"]
154 | args: {{ template "nebula.storaged.args" . }}
155 | env:
156 | - name: USER
157 | value: root
158 | resources:
159 | requests:
160 | cpu: {{ .Values.resources.storaged.requests.cpu | quote }}
161 | memory: {{ .Values.resources.storaged.requests.memory | quote }}
162 | limits:
163 | cpu: {{ .Values.resources.storaged.limits.cpu | quote }}
164 | memory: {{ .Values.resources.storaged.limits.memory | quote }}
165 | ports:
166 | - containerPort: {{ .Values.port.storaged.thriftPort }}
167 | name: thrift
168 | - containerPort: {{ .Values.port.storaged.httpPort }}
169 | name: http
170 | - containerPort: {{ .Values.port.storaged.http2Port }}
171 | name: http2
172 | livenessProbe:
173 | httpGet:
174 | path: {{ .Values.livenessProbe.httpGet.path }}
175 | port: {{ .Values.port.storaged.httpPort }}
176 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
177 | timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
178 | volumeMounts:
179 | - name: config
180 | mountPath: /usr/local/nebula/etc/
181 | - name: timezone
182 | mountPath: /etc/localtime
183 | - name: storaged
184 | mountPath: /usr/local/nebula/data
185 | subPath: data
186 | - name: storaged
187 | mountPath: /usr/local/nebula/logs
188 | subPath: logs
189 | volumes:
190 | - name: config
191 | configMap:
192 | name: nebula-storaged
193 | - name: timezone
194 | hostPath:
195 | path: /etc/localtime
196 | - name: storaged
197 | persistentVolumeClaim:
198 | claimName: storaged
199 | terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
200 | {{- with .Values.nodeSelector }}
201 | nodeSelector:
202 | {{- toYaml . | nindent 8 }}
203 | {{- end }}
204 | {{- with .Values.affinity }}
205 | affinity:
206 | {{- toYaml . | nindent 8 }}
207 | {{- end }}
208 | {{- with .Values.tolerations }}
209 | tolerations:
210 | {{- toYaml . | nindent 8 }}
211 | {{- end }}
212 | {{- if .Values.priorityClassName }}
213 | priorityClassName: {{ .Values.priorityClassName | quote }}
214 | {{- end }}
215 | podManagementPolicy: Parallel
216 | updateStrategy:
217 | type: RollingUpdate
218 | volumeClaimTemplates:
219 | - metadata:
220 | name: storaged
221 | spec:
222 | accessModes: [ "ReadWriteOnce" ]
223 | storageClassName: {{ .Values.storage.storageClass }}
224 | resources:
225 | requests:
226 | storage: {{ .Values.storage.storaged.size }}
227 |
--------------------------------------------------------------------------------
/charts/nebula/values.yaml:
--------------------------------------------------------------------------------
1 | # Available parameters and default values for the Nebula Graph chart.
2 |
3 | image:
4 | graphd:
5 | repository: vesoft/nebula-graphd
6 | tag: v2.0.0
7 | pullPolicy: IfNotPresent
8 | metad:
9 | repository: vesoft/nebula-metad
10 | tag: v2.0.0
11 | pullPolicy: IfNotPresent
12 | storaged:
13 | repository: vesoft/nebula-storaged
14 | tag: v2.0.0
15 | pullPolicy: IfNotPresent
16 |
17 | storage:
18 | # The StorageClass to use for StatefulSet storage.
19 | storageClass: fast-disks
20 | storaged:
21 | # This defines the disk size for configuring the
22 | # StatefulSet storage.
23 | size: 10Gi
24 | metad:
25 | size: 10Gi
26 |
27 | # The resource requests (CPU, memory, etc.)
28 | # for each of the nebula components.
29 | resources:
30 | graphd:
31 | limits:
32 | cpu: 1
33 | memory: 1Gi
34 | requests:
35 | cpu: 1
36 | memory: 1Gi
37 | metad:
38 | limits:
39 | cpu: 1
40 | memory: 1Gi
41 | requests:
42 | cpu: 1
43 | memory: 1Gi
44 | storaged:
45 | limits:
46 | cpu: 1
47 | memory: 1Gi
48 | requests:
49 | cpu: 1
50 | memory: 1Gi
51 |
52 | replication:
53 | storaged:
54 | replicas: 3
55 | metad:
56 | replicas: 3
57 | graphd:
58 | replicas: 2
59 |
60 | port:
61 | graphd:
62 | serviceType: ClusterIP
63 | thriftPort: 9669
64 | httpPort: 19669
65 | http2Port: 19670
66 | storaged:
67 | serviceType: ClusterIP
68 | thriftPort: 9779
69 | httpPort: 19779
70 | http2Port: 19780
71 | metad:
72 | serviceType: ClusterIP
73 | thriftPort: 9559
74 | httpPort: 19559
75 | http2Port: 19560
76 |
77 | # nebula metad & storaged container command
78 | commandArgs:
79 | metad:
80 | - "exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --daemonize=false"
81 | storaged:
82 | - "exec /usr/local/nebula/bin/nebula-storaged --flagfile=/usr/local/nebula/etc/nebula-storaged.conf --daemonize=false"
83 |
84 | # Time period for the controller pod to do a graceful shutdown
85 | terminationGracePeriodSeconds: 30
86 |
87 | # Liveness probe configuration for the nebula components
88 | livenessProbe:
89 | failureThreshold: 2
90 | httpGet:
91 | path: /status
92 | scheme: HTTP
93 | initialDelaySeconds: 30
94 | timeoutSeconds: 10
95 |
96 | serviceAccount:
97 | # Specifies whether a service account should be created
98 | create: true
99 | # Annotations to add to the service account
100 | annotations: {}
101 | # The name of the service account to use.
102 | # If not set and create is true, a name is generated using the fullname template
103 | name: nebula
104 |
105 | # The maximum number of unavailable pods.
106 | disruptionBudget:
107 | maxUnavailable: 1
108 |
109 | # Controls whether the pod may use the node network namespace.
110 | # This is required if using a custom CNI where your applications are unable to initiate network connections
111 | # to the pods outside the k8s cluster, for example using Flannel CNI plugin on K8S.
112 | hostNetwork: false
113 |
114 | # If 'hostNetwork' set to true, you should specify metadEndpoints with metad replicas properly.
115 | # Uncomment the following lines, adjust them as necessary, and remove the square brackets after 'metadEndpoints:'.
116 | metadEndpoints: []
117 | # - 192.168.8.25:9559
118 | # - 192.168.8.26:9559
119 | # - 192.168.8.27:9559
120 |
121 | # Leverage a PriorityClass to ensure the controller will survive resource shortages
122 | # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
123 | priorityClassName: ""
124 |
125 | # This value defines `nodeSelector` (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector)
126 | # labels for the pod to be scheduled on a node, formatted as a multi-line string.
127 | nodeSelector:
128 | nebula: "cloud"
129 |
130 | # Toleration Settings
131 | tolerations: []
132 |
133 | # Affinity Settings
134 | affinity: {}
135 |
136 | # Override various naming aspects of this chart
137 | # Only edit these if you know what you're doing
138 | nameOverride: ""
139 | fullnameOverride: ""
140 |
141 |
142 |
--------------------------------------------------------------------------------
/docker-compose-lite.yaml:
--------------------------------------------------------------------------------
1 | version: '3.4'
2 | services:
3 | metad0:
4 | image: docker.io/vesoft/nebula-metad:nightly
5 | environment:
6 | USER: root
7 | command:
8 | - --meta_server_addrs=metad0:9559
9 | - --local_ip=metad0
10 | - --ws_ip=metad0
11 | - --port=9559
12 | - --ws_http_port=19559
13 | - --data_path=/data/meta
14 | # - --log_dir=/logs
15 | # log to stderr not file
16 | - --logtostderr=true
17 | - --redirect_stdout=false
18 | # log to stderr not file
19 | - --v=0
20 | - --minloglevel=0
21 | healthcheck:
22 | test: ["CMD", "curl", "-sf", "http://metad0:19559/status"]
23 | interval: 30s
24 | timeout: 10s
25 | retries: 3
26 | start_period: 20s
27 | ports:
28 | - 9559:9559
29 | - 19559:19559
30 | - 19560
31 | volumes:
32 | - ./data/meta0:/data/meta
33 | # - ./logs/meta0:/logs
34 | networks:
35 | - nebula-net
36 | restart: on-failure
37 | cap_add:
38 | - SYS_PTRACE
39 |
40 | storaged0:
41 | image: docker.io/vesoft/nebula-storaged:nightly
42 | environment:
43 | USER: root
44 | command:
45 | - --meta_server_addrs=metad0:9559
46 | - --local_ip=storaged0
47 | - --ws_ip=storaged0
48 | - --port=9779
49 | - --ws_http_port=19779
50 | - --data_path=/data/storage
51 | # - --log_dir=/logs
52 | # log to stderr not file
53 | - --logtostderr=true
54 | - --redirect_stdout=false
55 | # log to stderr not file
56 | - --v=0
57 | - --minloglevel=0
58 | depends_on:
59 | - metad0
60 | healthcheck:
61 | test: ["CMD", "curl", "-sf", "http://storaged0:19779/status"]
62 | interval: 30s
63 | timeout: 10s
64 | retries: 3
65 | start_period: 20s
66 | ports:
67 | - 9779:9779
68 | - 19779:19779
69 | - 19780
70 | volumes:
71 | - ./data/storage0:/data/storage
72 | # - ./logs/storage0:/logs
73 | networks:
74 | - nebula-net
75 | restart: on-failure
76 | cap_add:
77 | - SYS_PTRACE
78 |
79 | graphd:
80 | image: docker.io/vesoft/nebula-graphd:nightly
81 | environment:
82 | USER: root
83 | command:
84 | - --meta_server_addrs=metad0:9559
85 | - --port=9669
86 | - --local_ip=graphd
87 | - --ws_ip=graphd
88 | - --ws_http_port=19669
89 | # - --log_dir=/logs
90 | # log to stderr not file
91 | - --logtostderr=true
92 | - --redirect_stdout=false
93 | # log to stderr not file
94 | - --v=0
95 | - --minloglevel=0
96 | depends_on:
97 | - storaged0
98 | healthcheck:
99 | test: ["CMD", "curl", "-sf", "http://graphd:19669/status"]
100 | interval: 30s
101 | timeout: 10s
102 | retries: 3
103 | start_period: 20s
104 | ports:
105 | - 9669:9669
106 | - 19669:19669
107 | - 19670
108 | # volumes:
109 | # - ./logs/graph:/logs
110 | networks:
111 | - nebula-net
112 | restart: on-failure
113 | cap_add:
114 | - SYS_PTRACE
115 |
116 | storage-activator:
117 | # This is just a script to activate storaged for the first time run by calling nebula-console
118 | # Refer to https://docs.nebula-graph.io/master/4.deployment-and-installation/manage-storage-host/#activate-storaged
119 | # If you like to call console via docker, run:
120 |
121 | # docker run --rm -ti --network host vesoft/nebula-console:nightly -addr 127.0.0.1 -port 9669 -u root -p nebula
122 |
123 | image: docker.io/vesoft/nebula-console:nightly
124 | entrypoint: ""
125 | environment:
126 | ACTIVATOR_RETRY: ${ACTIVATOR_RETRY:-30}
127 | command:
128 | - sh
129 | - -c
130 | - |
131 | for i in `seq 1 $$ACTIVATOR_RETRY`; do
132 | nebula-console -addr graphd -port 9669 -u root -p nebula -e 'ADD HOSTS "storaged0":9779' 1>/dev/null 2>/dev/null;
133 | if [[ $$? == 0 ]]; then
134 | echo "✔️ Storage activated successfully.";
135 | break;
136 | else
137 | output=$$(nebula-console -addr graphd -port 9669 -u root -p nebula -e 'ADD HOSTS "storaged0":9779' 2>&1);
138 | if echo "$$output" | grep -q "Existed"; then
139 | echo "✔️ Storage already activated, Exiting...";
140 | break;
141 | fi
142 | fi;
143 | if [[ $$i -lt $$ACTIVATOR_RETRY ]]; then
144 | echo "⏳ Attempting to activate storaged, attempt $$i/$$ACTIVATOR_RETRY... It's normal to take some attempts before storaged is ready. Please wait.";
145 | else
146 | echo "❌ Failed to activate storaged after $$ACTIVATOR_RETRY attempts. Please check MetaD, StorageD logs. Or restart the storage-activator service to continue retry.";
147 | echo "ℹ️ Error during storage activation:"
148 | echo "=============================================================="
149 | echo "$$output"
150 | echo "=============================================================="
151 | break;
152 | fi;
153 | sleep 5;
154 | done && tail -f /dev/null;
155 |
156 | depends_on:
157 | - graphd
158 | networks:
159 | - nebula-net
160 |
161 | networks:
162 | nebula-net:
163 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3.4'
2 | services:
3 | metad0:
4 | image: docker.io/vesoft/nebula-metad:nightly
5 | environment:
6 | USER: root
7 | TZ: "${TZ}"
8 | command:
9 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
10 | - --local_ip=metad0
11 | - --ws_ip=metad0
12 | - --port=9559
13 | - --ws_http_port=19559
14 | - --data_path=/data/meta
15 | - --log_dir=/logs
16 | - --v=0
17 | - --minloglevel=0
18 | healthcheck:
19 | test: ["CMD", "curl", "-sf", "http://metad0:19559/status"]
20 | interval: 30s
21 | timeout: 10s
22 | retries: 3
23 | start_period: 20s
24 | ports:
25 | - 9559
26 | - 19559
27 | - 19560
28 | volumes:
29 | - ./data/meta0:/data/meta
30 | - ./logs/meta0:/logs
31 | networks:
32 | - nebula-net
33 | restart: on-failure
34 | cap_add:
35 | - SYS_PTRACE
36 |
37 | metad1:
38 | image: docker.io/vesoft/nebula-metad:nightly
39 | environment:
40 | USER: root
41 | TZ: "${TZ}"
42 | command:
43 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
44 | - --local_ip=metad1
45 | - --ws_ip=metad1
46 | - --port=9559
47 | - --ws_http_port=19559
48 | - --data_path=/data/meta
49 | - --log_dir=/logs
50 | - --v=0
51 | - --minloglevel=0
52 | healthcheck:
53 | test: ["CMD", "curl", "-sf", "http://metad1:19559/status"]
54 | interval: 30s
55 | timeout: 10s
56 | retries: 3
57 | start_period: 20s
58 | ports:
59 | - 9559
60 | - 19559
61 | - 19560
62 | volumes:
63 | - ./data/meta1:/data/meta
64 | - ./logs/meta1:/logs
65 | networks:
66 | - nebula-net
67 | restart: on-failure
68 | cap_add:
69 | - SYS_PTRACE
70 |
71 | metad2:
72 | image: docker.io/vesoft/nebula-metad:nightly
73 | environment:
74 | USER: root
75 | TZ: "${TZ}"
76 | command:
77 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
78 | - --local_ip=metad2
79 | - --ws_ip=metad2
80 | - --port=9559
81 | - --ws_http_port=19559
82 | - --data_path=/data/meta
83 | - --log_dir=/logs
84 | - --v=0
85 | - --minloglevel=0
86 | healthcheck:
87 | test: ["CMD", "curl", "-sf", "http://metad2:19559/status"]
88 | interval: 30s
89 | timeout: 10s
90 | retries: 3
91 | start_period: 20s
92 | ports:
93 | - 9559
94 | - 19559
95 | - 19560
96 | volumes:
97 | - ./data/meta2:/data/meta
98 | - ./logs/meta2:/logs
99 | networks:
100 | - nebula-net
101 | restart: on-failure
102 | cap_add:
103 | - SYS_PTRACE
104 |
105 | storaged0:
106 | image: docker.io/vesoft/nebula-storaged:nightly
107 | environment:
108 | USER: root
109 | TZ: "${TZ}"
110 | command:
111 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
112 | - --local_ip=storaged0
113 | - --ws_ip=storaged0
114 | - --port=9779
115 | - --ws_http_port=19779
116 | - --data_path=/data/storage
117 | - --log_dir=/logs
118 | - --v=0
119 | - --minloglevel=0
120 | depends_on:
121 | - metad0
122 | - metad1
123 | - metad2
124 | healthcheck:
125 | test: ["CMD", "curl", "-sf", "http://storaged0:19779/status"]
126 | interval: 30s
127 | timeout: 10s
128 | retries: 3
129 | start_period: 20s
130 | ports:
131 | - 9779
132 | - 19779
133 | - 19780
134 | volumes:
135 | - ./data/storage0:/data/storage
136 | - ./logs/storage0:/logs
137 | networks:
138 | - nebula-net
139 | restart: on-failure
140 | cap_add:
141 | - SYS_PTRACE
142 |
143 | storaged1:
144 | image: docker.io/vesoft/nebula-storaged:nightly
145 | environment:
146 | USER: root
147 | TZ: "${TZ}"
148 | command:
149 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
150 | - --local_ip=storaged1
151 | - --ws_ip=storaged1
152 | - --port=9779
153 | - --ws_http_port=19779
154 | - --data_path=/data/storage
155 | - --log_dir=/logs
156 | - --v=0
157 | - --minloglevel=0
158 | depends_on:
159 | - metad0
160 | - metad1
161 | - metad2
162 | healthcheck:
163 | test: ["CMD", "curl", "-sf", "http://storaged1:19779/status"]
164 | interval: 30s
165 | timeout: 10s
166 | retries: 3
167 | start_period: 20s
168 | ports:
169 | - 9779
170 | - 19779
171 | - 19780
172 | volumes:
173 | - ./data/storage1:/data/storage
174 | - ./logs/storage1:/logs
175 | networks:
176 | - nebula-net
177 | restart: on-failure
178 | cap_add:
179 | - SYS_PTRACE
180 |
181 | storaged2:
182 | image: docker.io/vesoft/nebula-storaged:nightly
183 | environment:
184 | USER: root
185 | TZ: "${TZ}"
186 | command:
187 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
188 | - --local_ip=storaged2
189 | - --ws_ip=storaged2
190 | - --port=9779
191 | - --ws_http_port=19779
192 | - --data_path=/data/storage
193 | - --log_dir=/logs
194 | - --v=0
195 | - --minloglevel=0
196 | depends_on:
197 | - metad0
198 | - metad1
199 | - metad2
200 | healthcheck:
201 | test: ["CMD", "curl", "-sf", "http://storaged2:19779/status"]
202 | interval: 30s
203 | timeout: 10s
204 | retries: 3
205 | start_period: 20s
206 | ports:
207 | - 9779
208 | - 19779
209 | - 19780
210 | volumes:
211 | - ./data/storage2:/data/storage
212 | - ./logs/storage2:/logs
213 | networks:
214 | - nebula-net
215 | restart: on-failure
216 | cap_add:
217 | - SYS_PTRACE
218 |
219 | graphd:
220 | image: docker.io/vesoft/nebula-graphd:nightly
221 | environment:
222 | USER: root
223 | TZ: "${TZ}"
224 | command:
225 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
226 | - --port=9669
227 | - --local_ip=graphd
228 | - --ws_ip=graphd
229 | - --ws_http_port=19669
230 | - --log_dir=/logs
231 | - --v=0
232 | - --minloglevel=0
233 | depends_on:
234 | - storaged0
235 | - storaged1
236 | - storaged2
237 | healthcheck:
238 | test: ["CMD", "curl", "-sf", "http://graphd:19669/status"]
239 | interval: 30s
240 | timeout: 10s
241 | retries: 3
242 | start_period: 20s
243 | ports:
244 | - "9669:9669"
245 | - 19669
246 | - 19670
247 | volumes:
248 | - ./logs/graph:/logs
249 | networks:
250 | - nebula-net
251 | restart: on-failure
252 | cap_add:
253 | - SYS_PTRACE
254 |
255 | graphd1:
256 | image: docker.io/vesoft/nebula-graphd:nightly
257 | environment:
258 | USER: root
259 | TZ: "${TZ}"
260 | command:
261 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
262 | - --port=9669
263 | - --local_ip=graphd1
264 | - --ws_ip=graphd1
265 | - --ws_http_port=19669
266 | - --log_dir=/logs
267 | - --v=0
268 | - --minloglevel=0
269 | depends_on:
270 | - storaged0
271 | - storaged1
272 | - storaged2
273 | healthcheck:
274 | test: ["CMD", "curl", "-sf", "http://graphd1:19669/status"]
275 | interval: 30s
276 | timeout: 10s
277 | retries: 3
278 | start_period: 20s
279 | ports:
280 | - 9669
281 | - 19669
282 | - 19670
283 | volumes:
284 | - ./logs/graph1:/logs
285 | networks:
286 | - nebula-net
287 | restart: on-failure
288 | cap_add:
289 | - SYS_PTRACE
290 |
291 | graphd2:
292 | image: docker.io/vesoft/nebula-graphd:nightly
293 | environment:
294 | USER: root
295 | TZ: "${TZ}"
296 | command:
297 | - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
298 | - --port=9669
299 | - --local_ip=graphd2
300 | - --ws_ip=graphd2
301 | - --ws_http_port=19669
302 | - --log_dir=/logs
303 | - --v=0
304 | - --minloglevel=0
305 | depends_on:
306 | - storaged0
307 | - storaged1
308 | - storaged2
309 | healthcheck:
310 | test: ["CMD", "curl", "-sf", "http://graphd2:19669/status"]
311 | interval: 30s
312 | timeout: 10s
313 | retries: 3
314 | start_period: 20s
315 | ports:
316 | - 9669
317 | - 19669
318 | - 19670
319 | volumes:
320 | - ./logs/graph2:/logs
321 | networks:
322 | - nebula-net
323 | restart: on-failure
324 | cap_add:
325 | - SYS_PTRACE
326 |
327 | console:
328 | image: docker.io/vesoft/nebula-console:nightly
329 | entrypoint: ""
330 | command:
331 | - sh
332 | - -c
333 | - |
334 | for i in `seq 1 60`;do
335 | var=`nebula-console -addr graphd -port 9669 -u root -p nebula -e 'ADD HOSTS "storaged0":9779,"storaged1":9779,"storaged2":9779'`;
336 | if [[ $$? == 0 ]];then
337 | break;
338 | fi;
339 | sleep 1;
340 | echo "retry to add hosts.";
341 | done && tail -f /dev/null;
342 |
343 | depends_on:
344 | - graphd
345 | networks:
346 | - nebula-net
347 |
348 | networks:
349 | nebula-net:
350 |
--------------------------------------------------------------------------------