├── _config.yml ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ └── feature_request.md └── workflows │ └── buildDockerfile.yml ├── neo4j-admin ├── backup │ ├── .helmignore │ ├── testData │ │ ├── test.yaml │ │ └── test2.yaml │ ├── main │ │ └── main.go │ └── go.mod ├── Chart.yaml ├── .helmignore ├── templates │ └── _labels.tpl └── Dockerfile ├── internal ├── helpers │ ├── closeable.go │ ├── random.go │ ├── errors.go │ └── os.go ├── resources │ ├── testData │ │ ├── allowUpgrade.yaml │ │ ├── excludeLoadBalancer.yaml │ │ ├── priorityClassName.yaml │ │ ├── testAntiAffinityRule.yaml │ │ ├── acceptLicenseAgreement.yaml │ │ ├── acceptLicenseAgreementEval.yaml │ │ ├── acceptLicenseAgreementBoolYes.yaml │ │ ├── offlineMaintenanceMode.yaml │ │ ├── acceptLicenseAgreementBoolTrue.yaml │ │ ├── podSpecAnnotations.yaml │ │ ├── nodeselector.yaml │ │ ├── secretMounts │ │ │ ├── emptySecretMounts.yaml │ │ │ ├── invalidSecretMounts.yaml │ │ │ └── seeduriS3SecretMounts.yaml │ │ ├── boolsInConfig.yaml │ │ ├── statefulSetAnnotations.yaml │ │ ├── intsInConfig.yaml │ │ ├── apocCorePlugin.yaml │ │ ├── apocConfig.yaml │ │ ├── inMemoryVolume.yaml │ │ ├── metaspaceconfigs.yaml │ │ ├── additionalVolumes.yaml │ │ ├── read_replica_upstream_selection_strategy.yaml │ │ ├── csvMetrics.yaml │ │ ├── chmodInitContainer.yaml │ │ ├── tolerations.yaml │ │ ├── imagePullSecret │ │ │ ├── missingImageCreds.yaml │ │ │ ├── duplicateImageCreds.yaml │ │ │ ├── emptyImagePullSecrets.yaml │ │ │ └── emptyImageCreds.yaml │ │ ├── jvmAdditionalSettings.yaml │ │ ├── defaultStorageClass.yaml │ │ ├── pluginsInitContainer.yaml │ │ ├── apocClusterTest.yaml │ │ ├── secretMounts.yaml │ │ ├── chmodInitContainerAndCustomInitContainer.yaml │ │ ├── gdsStandaloneTest.yaml │ │ └── nodeAffinity.yaml │ ├── model.go │ └── resources.go ├── internal.go ├── integration_tests │ ├── cluster_connection.go │ ├── test_model.go │ ├── gcloud │ │ ├── model.go │ │ └── commands.go │ ├── volumes_test.go │ ├── persistent_data.go │ ├── maintenance.go │ ├── cluster_model.go │ └── standalone_test.go ├── model │ ├── loadbalancer_values.go │ ├── reverse_proxy_values.go │ ├── neo4j.go │ └── helm_charts.go ├── backup │ └── operations.go └── unit_tests │ ├── neo4j_resources.go │ ├── helm_template_headless_service_test.go │ ├── backup_operations_test.go │ └── helm_template_standalone_test.go ├── neo4j-reverse-proxy ├── reverse-proxy │ ├── go.mod │ ├── main.go │ ├── proxy │ │ ├── handle.go │ │ └── proxy.go │ └── operations │ │ └── operations.go ├── .helmignore ├── Chart.yaml ├── Dockerfile ├── templates │ ├── _validations.tpl │ ├── ingress.yaml │ ├── NOTES.txt │ ├── reverseProxyServer.yaml │ └── _helpers.tpl └── values.yaml ├── main.go ├── .gitignore ├── .helmignore ├── neo4j-docker-desktop-pv ├── templates │ ├── _helpers.tpl │ └── persistentvolume.yaml ├── .helmignore ├── values.yaml └── Chart.yaml ├── neo4j-persistent-volume ├── templates │ ├── _helpers.tpl │ ├── ops-pv.yaml │ └── data-pv.yaml ├── .helmignore ├── Chart.yaml └── values.yaml ├── artifacthub-repo.yml ├── examples ├── persistent-volume-manual │ ├── persistent-volume-manual.yaml │ ├── cleanup-example-azure.sh │ ├── cleanup-example-gcp.sh │ ├── cleanup-example-aws.sh │ ├── install-example-gcp.sh │ ├── README.md │ ├── install-example-azure.sh │ └── install-example-aws.sh ├── persistent-volume-selector-standalone │ ├── persistent-volume-selector.yaml │ ├── cleanup-example-azure.sh │ ├── cleanup-example-gcp.sh │ ├── cleanup-example-aws.sh │ ├── install-example-gcp.sh │ ├── install-example-azure.sh │ ├── install-example-aws.sh │ └── README.md ├── dedicated-storage-class-cluster │ ├── aws-storage-class.yaml │ ├── aks-storage-class.yaml │ ├── gcp-storage-class.yaml │ ├── dedicated-storage-class.yaml │ ├── cleanup.sh │ ├── install-example-gcp.sh │ ├── install-example-azure.sh │ ├── install-example-aws.sh │ └── README.md ├── bloom-gds-license │ ├── gds-no-license.yaml │ ├── install-gds-no-license.sh │ ├── install-gds-bloom-with-license.sh │ ├── gds-bloom-with-license.yaml │ └── README.md ├── persistent-volume-claim-template.yaml ├── persistent-volume-selector-cluster │ ├── persistent-volume-selector.yaml │ ├── cleanup-example-azure.sh │ ├── cleanup-example-gcp.sh │ ├── cleanup-example-aws.sh │ ├── install-example-gcp.sh │ ├── install-example-azure.sh │ ├── install-example-aws.sh │ └── README.md ├── multi-cluster │ ├── multi-cluster-cleanup-aks.sh │ ├── cluster-one-values.yaml │ ├── cluster-two-values.yaml │ ├── cluster-three-values.yaml │ └── README.md └── secret-mounts │ ├── seeduri-s3-values.yaml │ └── create-s3-secret.sh ├── neo4j-loadbalancer ├── templates │ ├── _helpers.tpl │ ├── _labels.tpl │ ├── NOTES.txt │ └── _loadbalancer.tpl ├── .helmignore ├── Chart.yaml └── values.yaml ├── neo4j ├── .helmignore ├── neo4j-operations │ ├── Dockerfile │ ├── verifications.go │ ├── main.go │ ├── k8s.go │ └── go.mod ├── Chart.yaml ├── templates │ ├── _labels.tpl │ ├── _loadbalancer.tpl │ ├── neo4j-servicemonitor.yaml │ ├── _licensing.tpl │ ├── neo4j-pdb.yaml │ ├── neo4j-env.yaml │ ├── neo4j-operations.yaml │ ├── _ssl.tpl │ ├── neo4j-imagePullSecret.yaml │ ├── neo4j-auth.yaml │ ├── _ldap.tpl │ ├── _image.tpl │ └── neo4j-service-account.yaml └── user-logs.xml ├── devenv.local.template ├── neo4j-headless-service ├── .helmignore ├── Chart.yaml ├── templates │ ├── _image.tpl │ ├── _licensing.tpl │ ├── NOTES.txt │ ├── _helpers.tpl │ └── neo4j-svc.yaml └── values.yaml ├── bin ├── docker-desktop-configure-kubectl ├── gcloud-configure-kubectl ├── gcloud-list-images ├── gcloud-auth ├── update-neo4j-conf ├── gcloud-delete-gke-cluster ├── README.md ├── gcloud-clean-gke-cluster ├── gcloud-create-persistent-disk ├── gcloud │ ├── gpg_signing │ ├── auth │ ├── delete_cluster │ ├── create_cluster │ ├── index_yaml_update │ └── cleanup_resources ├── gcloud-create-persistence ├── gcloud-create-gke-cluster ├── docker-desktop-create-persistent-disk ├── gcloud-create-filestore └── run-go-tests ├── README.md └── devenv /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-tactile -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @bfeshti @riggi-alekaj 2 | -------------------------------------------------------------------------------- /neo4j-admin/backup/.helmignore: -------------------------------------------------------------------------------- 1 | .backup/ 2 | .backup_linux 3 | -------------------------------------------------------------------------------- /internal/helpers/closeable.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | type Closeable func() error 4 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/reverse-proxy/go.mod: -------------------------------------------------------------------------------- 1 | module reverse-proxy 2 | 3 | go 1.23.8 4 | -------------------------------------------------------------------------------- /internal/resources/testData/allowUpgrade.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | dbms.allow_upgrade: "true" 3 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package helm_charts 2 | 3 | // This file is required so that go get -u works 4 | -------------------------------------------------------------------------------- /internal/resources/testData/excludeLoadBalancer.yaml: -------------------------------------------------------------------------------- 1 | podSpec: 2 | loadbalancer: "exclude" 3 | -------------------------------------------------------------------------------- /internal/resources/testData/priorityClassName.yaml: -------------------------------------------------------------------------------- 1 | podSpec: 2 | priorityClassName: "demo" 3 | -------------------------------------------------------------------------------- /internal/resources/testData/testAntiAffinityRule.yaml: -------------------------------------------------------------------------------- 1 | podSpec: 2 | podAntiAffinity: true 3 | -------------------------------------------------------------------------------- /internal/internal.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | // This file is required so that go get -u works 4 | -------------------------------------------------------------------------------- /internal/resources/testData/acceptLicenseAgreement.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | acceptLicenseAgreement: "yes" 3 | -------------------------------------------------------------------------------- /internal/resources/testData/acceptLicenseAgreementEval.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | acceptLicenseAgreement: "eval" -------------------------------------------------------------------------------- /neo4j-admin/backup/testData/test.yaml: -------------------------------------------------------------------------------- 1 | name: "demo" 2 | description: "for uploading to a bucket" 3 | -------------------------------------------------------------------------------- /neo4j-admin/backup/testData/test2.yaml: -------------------------------------------------------------------------------- 1 | name: "demo2" 2 | description: "for uploading to a bucket" 3 | -------------------------------------------------------------------------------- /internal/resources/testData/acceptLicenseAgreementBoolYes.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | acceptLicenseAgreement: yes 3 | -------------------------------------------------------------------------------- /internal/resources/testData/offlineMaintenanceMode.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | offlineMaintenanceModeEnabled: true 3 | -------------------------------------------------------------------------------- /internal/resources/testData/acceptLicenseAgreementBoolTrue.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | acceptLicenseAgreement: true 3 | -------------------------------------------------------------------------------- /internal/resources/testData/podSpecAnnotations.yaml: -------------------------------------------------------------------------------- 1 | podSpec: 2 | annotations: 3 | demoKey: "alpha" 4 | 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | output/ 2 | temp/ 3 | devenv.local 4 | .idea/ 5 | .kube/ 6 | .DS_Store 7 | *.test 8 | .backup_linux 9 | -------------------------------------------------------------------------------- /internal/resources/testData/nodeselector.yaml: -------------------------------------------------------------------------------- 1 | nodeSelector: 2 | sampleKey: sampleValue 3 | demo123: sample1 4 | -------------------------------------------------------------------------------- /.helmignore: -------------------------------------------------------------------------------- 1 | .neo4j-admin/backup/ 2 | neo4j-admin/Dockerfile 3 | .git 4 | tools 5 | k8s-poc 6 | *.log 7 | test 8 | *.test 9 | -------------------------------------------------------------------------------- /internal/resources/testData/secretMounts/emptySecretMounts.yaml: -------------------------------------------------------------------------------- 1 | # Test data with empty secretMounts configuration 2 | secretMounts: {} 3 | -------------------------------------------------------------------------------- /internal/resources/testData/boolsInConfig.yaml: -------------------------------------------------------------------------------- 1 | volumes: 2 | data: 3 | mode: "dynamic" 4 | 5 | config: 6 | metrics.enabled: true 7 | -------------------------------------------------------------------------------- /internal/resources/testData/statefulSetAnnotations.yaml: -------------------------------------------------------------------------------- 1 | statefulset: 2 | metadata: 3 | annotations: 4 | demoKey: "alpha" 5 | 6 | -------------------------------------------------------------------------------- /internal/resources/testData/intsInConfig.yaml: -------------------------------------------------------------------------------- 1 | volumes: 2 | data: 3 | mode: "dynamic" 4 | 5 | config: 6 | metrics.csv.rotation.keep_number: 2 7 | -------------------------------------------------------------------------------- /neo4j-docker-desktop-pv/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.appName" -}} 2 | {{ required "neo4j.name is required" .Values.neo4j.name }} 3 | {{- end -}} 4 | -------------------------------------------------------------------------------- /neo4j-persistent-volume/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.appName" -}} 2 | {{ required "neo4j.name is required" .Values.neo4j.name }} 3 | {{- end -}} 4 | -------------------------------------------------------------------------------- /internal/resources/testData/apocCorePlugin.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | "dbms.security.procedures.unrestricted": "apoc.*" 3 | "dbms.directories.plugins": "/var/lib/neo4j/labs" 4 | -------------------------------------------------------------------------------- /internal/resources/testData/apocConfig.yaml: -------------------------------------------------------------------------------- 1 | apoc_config: 2 | apoc.trigger.enabled: "true" 3 | apoc.jdbc.apoctest.url: "jdbc:foo:bar" 4 | apoc.import.file.enabled: "true" 5 | -------------------------------------------------------------------------------- /internal/resources/testData/inMemoryVolume.yaml: -------------------------------------------------------------------------------- 1 | volumes: 2 | data: 3 | mode: "volume" 4 | volume: 5 | emptyDir: 6 | medium: Memory 7 | sizeLimit: 1Gi 8 | -------------------------------------------------------------------------------- /internal/resources/testData/metaspaceconfigs.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | server.memory.pagecache.size: "74m" 3 | server.memory.heap.initial_size: "317m" 4 | server.memory.heap.max_size: "317m" 5 | -------------------------------------------------------------------------------- /artifacthub-repo.yml: -------------------------------------------------------------------------------- 1 | repositoryID: e907557a-7fe2-4914-bed1-d6a69876cba5 2 | owners: 3 | - name: Bledi Feshti 4 | email: bledi.feshti@neotechnology.com 5 | - name: Rigert Alekaj 6 | email: rigert.alekaj@neo4j.com -------------------------------------------------------------------------------- /internal/resources/testData/additionalVolumes.yaml: -------------------------------------------------------------------------------- 1 | additionalVolumes: 2 | - name: neo4j1-conf 3 | emptyDir: {} 4 | 5 | additionalVolumeMounts: 6 | - mountPath: "/config/neo4j1.conf" 7 | name: neo4j1-conf 8 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/persistent-volume-manual.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: volume-manual 3 | volumes: 4 | data: 5 | mode: volume 6 | volume: 7 | persistentVolumeClaim: 8 | claimName: volume-manual-disk-pvc 9 | -------------------------------------------------------------------------------- /internal/resources/testData/read_replica_upstream_selection_strategy.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | causal_clustering.connect-randomly-to-server-group: "read-replicas" 3 | causal_clustering.upstream_selection_strategy: "connect-randomly-to-server-group" 4 | -------------------------------------------------------------------------------- /internal/resources/testData/csvMetrics.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | metrics.enabled: "true" 3 | metrics.namespaces.enabled: "true" 4 | metrics.csv.interval: "10s" 5 | metrics.csv.rotation.keep_number: "2" 6 | metrics.csv.rotation.compression: "NONE" 7 | -------------------------------------------------------------------------------- /internal/helpers/random.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "math/rand" 5 | "time" 6 | ) 7 | 8 | var random = rand.New(rand.NewSource(time.Now().UnixNano())) 9 | 10 | func RandomIntBetween(low, hi int) int { 11 | return low + random.Intn(hi-low) 12 | } 13 | -------------------------------------------------------------------------------- /neo4j-loadbalancer/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.name" -}} 2 | {{- required "neo4j.name is required" .Values.neo4j.name }} 3 | {{- end -}} 4 | 5 | {{- define "neo4j.appName" -}} 6 | {{- required "neo4j.name is required" .Values.neo4j.name }} 7 | {{- end -}} 8 | -------------------------------------------------------------------------------- /internal/resources/testData/chmodInitContainer.yaml: -------------------------------------------------------------------------------- 1 | volumes: 2 | data: 3 | mode: "dynamic" 4 | 5 | logs: 6 | mode: volume 7 | volume: 8 | persistentVolumeClaim: 9 | claimName: logs-filestore 10 | setOwnerAndGroupWritableFilePermissions: true 11 | -------------------------------------------------------------------------------- /internal/resources/testData/tolerations.yaml: -------------------------------------------------------------------------------- 1 | podSpec: 2 | tolerations: 3 | - key: "key1" 4 | operator: "Equal" 5 | value: "value1" 6 | effect: "NoSchedule" 7 | - key: "key2" 8 | operator: "Equal" 9 | value: "value2" 10 | effect: "NoSchedule" 11 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/persistent-volume-selector.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: volume-selector 3 | volumes: 4 | data: 5 | mode: selector 6 | selector: 7 | storageClassName: "manual" 8 | accessModes: 9 | - ReadWriteOnce 10 | requests: 11 | storage: 10Gi 12 | -------------------------------------------------------------------------------- /internal/resources/testData/imagePullSecret/missingImageCreds.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | imagePullSecrets: 3 | - "secret1" 4 | imageCredentials: 5 | - registry: "https://index.docker.io/v1/" 6 | username: "sampleuser" 7 | password: "samplepass" 8 | email: "sample@gmail.com" 9 | name: "sample" 10 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/aws-storage-class.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: neo4j-data 5 | provisioner: ebs.csi.aws.com 6 | parameters: 7 | type: gp3 8 | reclaimPolicy: Retain 9 | allowVolumeExpansion: true 10 | volumeBindingMode: WaitForFirstConsumer 11 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/aks-storage-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: neo4j-data 5 | provisioner: pd.csi.storage.gke.io 6 | parameters: 7 | type: pd-ssd 8 | reclaimPolicy: Retain 9 | volumeBindingMode: WaitForFirstConsumer 10 | allowVolumeExpansion: true 11 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/gcp-storage-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: neo4j-data 5 | provisioner: pd.csi.storage.gke.io 6 | parameters: 7 | type: pd-ssd 8 | reclaimPolicy: Retain 9 | volumeBindingMode: WaitForFirstConsumer 10 | allowVolumeExpansion: true 11 | -------------------------------------------------------------------------------- /internal/helpers/errors.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import "github.com/hashicorp/go-multierror" 4 | 5 | func CombineErrors(firstOrNil error, second error) error { 6 | if firstOrNil == nil { 7 | firstOrNil = second 8 | } else { 9 | firstOrNil = multierror.Append(firstOrNil, second) 10 | } 11 | return firstOrNil 12 | } 13 | -------------------------------------------------------------------------------- /examples/bloom-gds-license/gds-no-license.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: gds-no-license 3 | acceptLicenseAgreement: "yes" 4 | edition: enterprise 5 | volumes: 6 | data: 7 | mode: defaultStorageClass 8 | env: 9 | NEO4J_PLUGINS: '["graph-data-science"]' 10 | config: 11 | dbms.security.procedures.unrestricted: "gds.*,apoc.*" 12 | -------------------------------------------------------------------------------- /internal/resources/testData/jvmAdditionalSettings.yaml: -------------------------------------------------------------------------------- 1 | jvm: 2 | useNeo4jDefaultJvmArguments: true 3 | additionalJvmArguments: 4 | - "-XX:+HeapDumpOnOutOfMemoryError" 5 | - "-XX:HeapDumpPath=./java_pid.hprof" 6 | - "-XX:+UseGCOverheadLimit" 7 | - "-XX:MaxMetaspaceSize=180m" 8 | - "-XX:ReservedCodeCacheSize=40m" 9 | -------------------------------------------------------------------------------- /examples/persistent-volume-claim-template.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: neo4j-volume-claim-template 3 | volumes: 4 | data: 5 | mode: volumeClaimTemplate 6 | volumeClaimTemplate: 7 | storageClassName: "premium-rwo" 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | -------------------------------------------------------------------------------- /internal/resources/testData/defaultStorageClass.yaml: -------------------------------------------------------------------------------- 1 | volumes: 2 | data: 3 | mode: "defaultStorageClass" 4 | logs: 5 | mode: "defaultStorageClass" 6 | defaultStorageClass: 7 | requests: 8 | storage: 1Gi 9 | backups: 10 | mode: "defaultStorageClass" 11 | defaultStorageClass: 12 | requests: 13 | storage: 100Gi 14 | -------------------------------------------------------------------------------- /examples/bloom-gds-license/install-gds-no-license.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=gds-no-license 4 | 5 | helm_install() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm install "${RELEASE_NAME}" neo4j -f examples/bloom-gds-license/gds-no-license.yaml 8 | } 9 | 10 | helm_install 11 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/persistent-volume-selector.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: volume-selector 3 | minimumClusterSize: 3 4 | acceptLicenseAgreement: "yes" 5 | edition: enterprise 6 | volumes: 7 | data: 8 | mode: selector 9 | selector: 10 | storageClassName: "manual" 11 | accessModes: 12 | - ReadWriteOnce 13 | requests: 14 | storage: 10Gi 15 | -------------------------------------------------------------------------------- /internal/resources/testData/pluginsInitContainer.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | "server.security.procedures.unrestricted": "apoc.*" 3 | "server.directories.plugins": "/plugins" 4 | 5 | volumes: 6 | plugins: 7 | mode: volume 8 | volume: 9 | emptyDir: { } 10 | 11 | podSpec: 12 | initContainers: 13 | - name: init-plugins 14 | command: [ 'bash', '-c', "cp /var/lib/neo4j/labs/* /plugins" ] 15 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/cleanup-example-azure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-manual 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm uninstall ${RELEASE_NAME} ${RELEASE_NAME}-disk --wait --timeout 1m 8 | az disk delete --name ${RELEASE_NAME} -y 9 | } 10 | 11 | cleanup 12 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/cleanup-example-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-manual 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm uninstall ${RELEASE_NAME} ${RELEASE_NAME}-disk --wait --timeout 1m 8 | gcloud compute disks delete ${RELEASE_NAME} --quiet 9 | } 10 | 11 | cleanup 12 | -------------------------------------------------------------------------------- /internal/resources/testData/apocClusterTest.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | server.directories.plugins: "/var/lib/neo4j/labs" 3 | dbms.security.procedures.unrestricted: "apoc.*" 4 | server.config.strict_validation.enabled: "false" 5 | dbms.security.procedures.allowlist: "gds.*,apoc.*" 6 | 7 | apoc_config: 8 | apoc.trigger.enabled: "true" 9 | apoc.jdbc.apoctest.url: "jdbc:foo:bar" 10 | apoc.import.file.enabled: "true" 11 | -------------------------------------------------------------------------------- /internal/resources/testData/secretMounts.yaml: -------------------------------------------------------------------------------- 1 | secretMounts: 2 | s3-credentials: 3 | secretName: "my-s3-secret" 4 | mountPath: "/var/secrets/s3" 5 | items: 6 | - key: "access-key" 7 | path: "access-key" 8 | - key: "secret-key" 9 | path: "secret-key" 10 | defaultMode: 0600 11 | 12 | tls-certificates: 13 | secretName: "my-tls-certs" 14 | mountPath: "/var/secrets/tls" 15 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/dedicated-storage-class.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: dedicated-storage-class 3 | passwordFromSecret: neo4j-auth 4 | minimumClusterSize: 3 5 | acceptLicenseAgreement: "yes" 6 | edition: enterprise 7 | volumes: 8 | data: 9 | mode: dynamic 10 | dynamic: 11 | storageClassName: "neo4j-data" 12 | accessModes: 13 | - ReadWriteOnce 14 | requests: 15 | storage: 100Gi 16 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly RELEASE_NAME=dedicated-storage-class 3 | 4 | cleanup() { 5 | kubectl delete storageclass neo4j-data 6 | helm uninstall ${RELEASE_NAME}-1 ${RELEASE_NAME}-2 ${RELEASE_NAME}-3 7 | kubectl delete pvc data-${RELEASE_NAME}-1-0 8 | kubectl delete pvc data-${RELEASE_NAME}-2-0 9 | kubectl delete pvc data-${RELEASE_NAME}-3-0 10 | 11 | } 12 | 13 | cleanup 14 | -------------------------------------------------------------------------------- /internal/resources/testData/imagePullSecret/duplicateImageCreds.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | imageCredentials: 3 | - registry: "https://index.docker.io/v1/" 4 | username: "sample1" 5 | password: "samplepass" 6 | email: "sample1@gmail.com" 7 | name: "sample1" 8 | - registry: "https://index.docker.io/v1/" 9 | username: "sample2" 10 | password: "samplepass" 11 | email: "sample2@gmail.com" 12 | name: "sample1" 13 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/cleanup-example-azure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm uninstall ${RELEASE_NAME} ${RELEASE_NAME}-disk 8 | kubectl delete pvc data-${RELEASE_NAME}-0 9 | az disk delete --name ${RELEASE_NAME} -y 10 | } 11 | 12 | cleanup 13 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/cleanup-example-azure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm uninstall ${RELEASE_NAME} ${RELEASE_NAME}-disk 8 | kubectl delete pvc data-${RELEASE_NAME}-0 9 | az disk delete --name ${RELEASE_NAME} -y 10 | } 11 | 12 | cleanup 13 | -------------------------------------------------------------------------------- /neo4j-loadbalancer/templates/_labels.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.labels" -}} 2 | {{- with .labels -}} 3 | {{- range $name, $value := . }} 4 | {{ $name | quote}}: {{ $value | quote }} 5 | {{- end -}} 6 | {{- end -}} 7 | {{- end }} 8 | 9 | {{- define "neo4j.annotations" -}} 10 | {{- with . -}} 11 | {{- range $name, $value := . }} 12 | {{ $name | quote }}: {{ $value | quote }} 13 | {{- end -}} 14 | {{- end -}} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /neo4j/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/cleanup-example-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm uninstall ${RELEASE_NAME} ${RELEASE_NAME}-disk 8 | kubectl delete pvc data-${RELEASE_NAME}-0 9 | gcloud compute disks delete ${RELEASE_NAME} --quiet 10 | } 11 | 12 | cleanup 13 | -------------------------------------------------------------------------------- /devenv.local.template: -------------------------------------------------------------------------------- 1 | CLOUDSDK_CORE_PROJECT="your project" 2 | CLOUDSDK_COMPUTE_REGION="your region" # e.g. europe-north1 3 | CLOUDSDK_COMPUTE_ZONE="your zone" # e.g. europe-north1-b 4 | CLOUDSDK_CONTAINER_CLUSTER="your container cluster name" # this needs to be set, to create a cluster - set this and run `source devenv && gcloud-create-gke-cluster` 5 | PACKAGE_SIGNING_KEY="Name of gpg key for signing helm packages" 6 | PACKAGE_SIGNING_KEYRING="Path to gpg keyring containing the key" 7 | -------------------------------------------------------------------------------- /neo4j-loadbalancer/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /neo4j-docker-desktop-pv/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /neo4j-headless-service/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /neo4j-persistent-volume/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /internal/integration_tests/cluster_connection.go: -------------------------------------------------------------------------------- 1 | package integration_tests 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func waitForClusterConnection(t *testing.T) error { 10 | maxRetries := 3 11 | for i := 0; i < maxRetries; i++ { 12 | err := run(t, "kubectl", "cluster-info") 13 | if err == nil { 14 | return nil 15 | } 16 | time.Sleep(10 * time.Second) 17 | } 18 | return fmt.Errorf("failed to connect to cluster after %d attempts", maxRetries) 19 | } 20 | -------------------------------------------------------------------------------- /neo4j-admin/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: neo4j-admin 3 | home: https://www.neo4j.com 4 | version: 2025.10.1-4 5 | appVersion: 2025.10.1 6 | description: Neo4j is the world's leading graph database 7 | keywords: 8 | - graph 9 | - database 10 | - cypher 11 | icon: http://info.neo4j.com/rs/773-GON-065/images/neo4j_logo.png 12 | sources: 13 | - https://github.com/neo4j/neo4j 14 | - https://github.com/neo4j/docker-neo4j 15 | maintainers: 16 | - name: bfeshti 17 | email: bledi.feshti@neotechnology.com 18 | -------------------------------------------------------------------------------- /bin/docker-desktop-configure-kubectl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This sets up the kubectl configuration to point at the current cluster 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | KUBECONFIG="${KUBECONFIG:?KUBECONFIG is required}" 13 | 14 | # Configure kubectl 15 | kubectl config use-context docker-desktop 16 | 17 | echo "kubectl configured for $(kubectl config current-context)" 18 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/cleanup-example-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | for i in {1..3}; do 8 | helm uninstall ${RELEASE_NAME}-${i} ${RELEASE_NAME}-disk-${i} 9 | kubectl delete pvc data-${RELEASE_NAME}-${i}-0 10 | gcloud compute disks delete ${RELEASE_NAME}-${i} --quiet 11 | done 12 | } 13 | 14 | cleanup 15 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/cleanup-example-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-manual 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm uninstall ${RELEASE_NAME} ${RELEASE_NAME}-disk --wait --timeout 1m 8 | aws ec2 delete-volume --volume-id "$(aws ec2 describe-volumes --filters Name=tag:volume,Values="${RELEASE_NAME}" --no-cli-pager --query 'Volumes[0].VolumeId' --output text)" 9 | } 10 | 11 | cleanup 12 | -------------------------------------------------------------------------------- /internal/resources/testData/imagePullSecret/emptyImagePullSecrets.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | imagePullSecrets: 3 | - "secret1" 4 | - " " 5 | - "secret2" 6 | - "" 7 | imageCredentials: 8 | - registry: "https://index.docker.io/v1/" 9 | username: "sampleuser" 10 | password: "samplepass" 11 | email: "sample@gmail.com" 12 | name: "secret1" 13 | - registry: "https://index.docker.io/v1/" 14 | username: "demo" 15 | password: "demo" 16 | email: "demo@gmail.com" 17 | name: "secret2" 18 | -------------------------------------------------------------------------------- /internal/resources/testData/chmodInitContainerAndCustomInitContainer.yaml: -------------------------------------------------------------------------------- 1 | volumes: 2 | data: 3 | mode: "dynamic" 4 | 5 | logs: 6 | mode: volume 7 | volume: 8 | persistentVolumeClaim: 9 | claimName: logs-filestore 10 | setOwnerAndGroupWritableFilePermissions: true 11 | 12 | podSpec: 13 | initContainers: 14 | - name: init-printenv 15 | command: [ 'sh', '-c', "printenv" ] 16 | 17 | containers: 18 | - name: maintenance-sidecar 19 | command: [ 'bash', '-c', "while true; do sleep 120; done" ] 20 | -------------------------------------------------------------------------------- /neo4j-admin/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *.go 19 | *~ 20 | # Various IDEs 21 | .project 22 | .idea/ 23 | .backup/ 24 | *.tmproj 25 | Dockerfile 26 | .vscode/ 27 | *.backup_linux 28 | backup_linux 29 | -------------------------------------------------------------------------------- /neo4j/neo4j-operations/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23.8-alpine 2 | RUN apk --no-cache add curl \ 3 | && addgroup --gid 7474 --system neo4j \ 4 | && adduser --uid 7474 --system --no-create-home --home "/go" --ingroup neo4j neo4j 5 | WORKDIR neo4j-operations 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | COPY *.go ./ 9 | 10 | #RUN go get -v -t ./... \ 11 | RUN go mod download && go mod verify \ 12 | && go build -v -o /go/neo4j_operations_linux \ 13 | && chown -R neo4j:neo4j /go && chmod -R 777 /go 14 | CMD ["/go/neo4j_operations_linux"] 15 | -------------------------------------------------------------------------------- /neo4j/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: neo4j 3 | home: https://www.neo4j.com 4 | version: 2025.10.1-4 5 | appVersion: 2025.10.1 6 | description: Neo4j is the world's leading graph database 7 | keywords: 8 | - graph 9 | - database 10 | - cypher 11 | icon: http://info.neo4j.com/rs/773-GON-065/images/neo4j_logo.png 12 | sources: 13 | - https://github.com/neo4j/neo4j 14 | - https://github.com/neo4j/docker-neo4j 15 | maintainers: 16 | - name: bfeshti 17 | email: bledi.feshti@neotechnology.com 18 | - name: Rigert Alekaj 19 | email: rigert.alekaj@neo4j.com -------------------------------------------------------------------------------- /neo4j-docker-desktop-pv/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for Neo4j Disk for Docker Desktop. 2 | 3 | neo4j: 4 | name: "" 5 | 6 | # size of disk used by K8s when scheduling and binding, this has to be passed explicitly to K8s, it's not capable of figuring this out for itself. 7 | capacity: 8 | storage: "" 9 | 10 | # Host path should be an absolute path to a directory on the host machine that is shared with Docker Desktop 11 | # the directory must exist, it will not be created automatically. 12 | # e.g. 13 | # hostPath: /Users/my_username/neo4j/disk1 14 | hostPath: "" 15 | -------------------------------------------------------------------------------- /internal/resources/testData/secretMounts/invalidSecretMounts.yaml: -------------------------------------------------------------------------------- 1 | # Test data with invalid secretMounts configuration 2 | secretMounts: 3 | # Missing secretName (should fail validation) 4 | missing-secret-name: 5 | mountPath: "/var/secrets/s3" 6 | 7 | # Missing mountPath (should fail validation) 8 | missing-mount-path: 9 | secretName: "my-secret" 10 | 11 | # Invalid items configuration (missing key) 12 | invalid-items: 13 | secretName: "my-secret" 14 | mountPath: "/var/secrets/test" 15 | items: 16 | - path: "test-file" # Missing key field 17 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/cleanup-example-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | helm uninstall ${RELEASE_NAME} ${RELEASE_NAME}-disk 8 | kubectl delete pvc data-${RELEASE_NAME}-0 9 | aws ec2 delete-volume --volume-id "$(aws ec2 describe-volumes --filters Name=tag:volume,Values="${RELEASE_NAME}" --no-cli-pager --query 'Volumes[0].VolumeId' --output text)" 10 | } 11 | 12 | cleanup 13 | -------------------------------------------------------------------------------- /neo4j-loadbalancer/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: neo4j-loadbalancer 3 | home: https://www.neo4j.com 4 | version: 2025.10.1-4 5 | appVersion: "-" 6 | description: Neo4j is the world's leading graph database 7 | keywords: 8 | - graph 9 | - database 10 | - cypher 11 | icon: http://info.neo4j.com/rs/773-GON-065/images/neo4j_logo.png 12 | sources: 13 | - https://github.com/neo4j/neo4j 14 | - https://github.com/neo4j/docker-neo4j 15 | maintainers: 16 | - name: bfeshti 17 | email: bledi.feshti@neotechnology.com 18 | - name: Rigert Alekaj 19 | email: rigert.alekaj@neo4j.com -------------------------------------------------------------------------------- /neo4j-headless-service/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: neo4j-headless-service 3 | home: https://www.neo4j.com 4 | version: 2025.10.1-4 5 | appVersion: "-" 6 | description: Neo4j is the world's leading graph database 7 | keywords: 8 | - graph 9 | - database 10 | - cypher 11 | icon: http://info.neo4j.com/rs/773-GON-065/images/neo4j_logo.png 12 | sources: 13 | - https://github.com/neo4j/neo4j 14 | - https://github.com/neo4j/docker-neo4j 15 | maintainers: 16 | - name: bfeshti 17 | email: bledi.feshti@neotechnology.com 18 | - name: Rigert Alekaj 19 | email: rigert.alekaj@neo4j.com -------------------------------------------------------------------------------- /internal/model/loadbalancer_values.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | type Neo4jLoadBalancerValues struct { 4 | Neo4j Neo4jLoadBalancer `yaml:"neo4j"` 5 | Annotations map[string]string `yaml:"annotations"` 6 | Ports Ports `yaml:"ports"` 7 | Selector map[string]string `yaml:"selector"` 8 | Spec Spec `yaml:"spec"` 9 | ClusterDomain string `yaml:"clusterDomain"` 10 | MultiCluster bool `yaml:"multiCluster"` 11 | } 12 | type Neo4jLoadBalancer struct { 13 | Name string `yaml:"name"` 14 | Edition string `yaml:"edition"` 15 | } 16 | -------------------------------------------------------------------------------- /neo4j-persistent-volume/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: neo4j-persistent-volume 3 | home: https://www.neo4j.com 4 | version: 2025.10.1-4 5 | appVersion: "-" 6 | description: Sets up persistent disks suitable for a Neo4j Helm installation 7 | keywords: 8 | - graph 9 | - database 10 | - cypher 11 | icon: http://info.neo4j.com/rs/773-GON-065/images/neo4j_logo.png 12 | sources: 13 | - https://github.com/neo4j/neo4j 14 | - https://github.com/neo4j/docker-neo4j 15 | maintainers: 16 | - name: bfeshti 17 | email: bledi.feshti@neotechnology.com 18 | - name: Rigert Alekaj 19 | email: rigert.alekaj@neo4j.com -------------------------------------------------------------------------------- /examples/multi-cluster/multi-cluster-cleanup-aks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly AKS_GROUP=multiClusterGroup 3 | 4 | cleanup() { 5 | az aks delete -y --name clusterone -g ${AKS_GROUP} 6 | az aks delete -y --name clustertwo -g ${AKS_GROUP} 7 | az aks delete -y --name clusterthree -g ${AKS_GROUP} 8 | az network application-gateway delete --name multiClusterGateway -g ${AKS_GROUP} 9 | az network vnet delete --name multiClusterVnet --resource-group ${AKS_GROUP} 10 | az network public-ip delete -n appGatewayIp -g ${AKS_GROUP} 11 | az group delete -g ${AKS_GROUP} -y 12 | } 13 | 14 | cleanup 15 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: neo4j-reverse-proxy 3 | home: https://www.neo4j.com 4 | version: 2025.10.1-4 5 | appVersion: 2025.10.1 6 | description: Sets up an http server and a reverse proxy for bolt and http requests 7 | keywords: 8 | - graph 9 | - database 10 | - cypher 11 | icon: http://info.neo4j.com/rs/773-GON-065/images/neo4j_logo.png 12 | sources: 13 | - https://github.com/neo4j/neo4j 14 | - https://github.com/neo4j/docker-neo4j 15 | maintainers: 16 | - name: bfeshti 17 | email: bledi.feshti@neotechnology.com 18 | - name: Rigert Alekaj 19 | email: rigert.alekaj@neo4j.com 20 | -------------------------------------------------------------------------------- /neo4j-admin/backup/main/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | ) 7 | 8 | func main() { 9 | 10 | if aggregateEnabled := os.Getenv("AGGREGATE_BACKUP_ENABLED"); aggregateEnabled != "true" { 11 | startupOperations() 12 | } 13 | 14 | cloudProvider := os.Getenv("CLOUD_PROVIDER") 15 | switch cloudProvider { 16 | case "aws": 17 | awsOperations() 18 | break 19 | case "azure": 20 | azureOperations() 21 | break 22 | case "gcp": 23 | gcpOperations() 24 | break 25 | case "": 26 | onPrem() 27 | break 28 | default: 29 | log.Fatalf("Incorrect cloud provider %s", cloudProvider) 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/install-example-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=dedicated-storage-class 4 | 5 | helm_install() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | kubectl create secret generic neo4j-auth --from-literal=NEO4J_AUTH=neo4j/password123 8 | kubectl apply -f examples/dedicated-storage-class-cluster/gcp-storage-class.yaml 9 | for i in {1..3}; do 10 | helm install "${RELEASE_NAME}-${i}" neo4j -f examples/dedicated-storage-class-cluster/dedicated-storage-class.yaml 11 | done 12 | } 13 | 14 | helm_install 15 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/cleanup-example-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | 5 | cleanup() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | for i in {1..3}; do 8 | helm uninstall ${RELEASE_NAME}-${i} ${RELEASE_NAME}-disk-${i} 9 | kubectl delete pvc data-${RELEASE_NAME}-${i}-0 10 | aws ec2 delete-volume --volume-id "$(aws ec2 describe-volumes --filters Name=tag:volume,Values="${RELEASE_NAME}-${i}" --no-cli-pager --query 'Volumes[0].VolumeId' --output text)" 11 | done 12 | } 13 | 14 | cleanup 15 | -------------------------------------------------------------------------------- /internal/resources/testData/gdsStandaloneTest.yaml: -------------------------------------------------------------------------------- 1 | volumes: 2 | licenses: 3 | disableSubPathExpr: true 4 | mode: volume 5 | volume: 6 | secret: 7 | secretName: bloom-license 8 | items: 9 | - key: bloom.license 10 | path: bloom.license 11 | 12 | 13 | env: 14 | NEO4J_PLUGINS: '["bloom"]' 15 | config: 16 | dbms.security.procedures.unrestricted: "gds.*,apoc.*,bloom.*" 17 | server.unmanaged_extension_classes: "com.neo4j.bloom.server=/bloom,semantics.extension=/rdf" 18 | dbms.security.http_auth_allowlist: "/,/browser.*,/bloom.*" 19 | dbms.bloom.license_file: "/licenses/bloom.license" 20 | 21 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/install-example-azure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=dedicated-storage-class 4 | 5 | helm_install() { 6 | pushd "${PROJECT_ROOT}" > /dev/null || exit 7 | kubectl create secret generic neo4j-auth --from-literal=NEO4J_AUTH=neo4j/password123 8 | kubectl apply -f examples/dedicated-storage-class-cluster/aks-storage-class.yaml 9 | for i in {1..3}; do 10 | helm install "${RELEASE_NAME}-${i}" neo4j -fexamples/dedicated-storage-class-cluster/dedicated-storage-class.yaml 11 | done 12 | } 13 | 14 | helm_install 15 | -------------------------------------------------------------------------------- /bin/gcloud-configure-kubectl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This sets up the kubectl configuration to point at the current cluster 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | CLOUDSDK_CONTAINER_CLUSTER="${CLOUDSDK_CONTAINER_CLUSTER:?CLOUDSDK_CONTAINER_CLUSTER is required}" 13 | KUBECONFIG="${KUBECONFIG:?KUBECONFIG is required}" 14 | 15 | # Configure kubectl 16 | gcloud container clusters get-credentials "${CLOUDSDK_CONTAINER_CLUSTER}" 17 | 18 | echo "kubectl configured for $(kubectl config current-context)" 19 | -------------------------------------------------------------------------------- /internal/resources/testData/nodeAffinity.yaml: -------------------------------------------------------------------------------- 1 | podSpec: 2 | nodeAffinity: 3 | requiredDuringSchedulingIgnoredDuringExecution: 4 | nodeSelectorTerms: 5 | - matchExpressions: 6 | - key: topology.kubernetes.io/zone 7 | operator: In 8 | values: 9 | - antarctica-east1 10 | - antarctica-west1 11 | preferredDuringSchedulingIgnoredDuringExecution: 12 | - weight: 1 13 | preference: 14 | matchExpressions: 15 | - key: another-node-label-key 16 | operator: In 17 | values: 18 | - another-node-label-value 19 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23.8-alpine AS build 2 | WORKDIR /go/reverse-proxy 3 | COPY reverse-proxy/operations operations/ 4 | COPY reverse-proxy/proxy proxy/ 5 | COPY reverse-proxy/go.mod go.mod 6 | COPY reverse-proxy/main.go main.go 7 | RUN go mod download && go mod verify \ 8 | && go build -v -o reverseproxy_linux main.go 9 | 10 | FROM alpine:3.21.3 AS run 11 | RUN addgroup --gid 7474 --system neo4j \ 12 | && adduser --uid 7474 --system --no-create-home neo4j 13 | WORKDIR /reverse-proxy 14 | COPY --from=build --chown=neo4j:neo4j --chmod=777 /go/reverse-proxy/reverseproxy_linux reverseproxy_linux 15 | USER neo4j 16 | CMD ["./reverseproxy_linux"] 17 | -------------------------------------------------------------------------------- /examples/bloom-gds-license/install-gds-bloom-with-license.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=gds-bloom-with-license 4 | readonly GDS_LICENSE_FILE=${1?' GDS license file path must be 1st argument'} 5 | readonly BLOOM_LICENSE_FILE=${2?' Bloom license file path must be 1st argument'} 6 | 7 | helm_install() { 8 | pushd "${PROJECT_ROOT}" > /dev/null || exit 9 | kubectl create secret generic --from-file=${GDS_LICENSE_FILE},${BLOOM_LICENSE_FILE} gds-bloom-license 10 | helm install "${RELEASE_NAME}" neo4j -f examples/bloom-gds-license/gds-bloom-with-license.yaml 11 | } 12 | 13 | helm_install 14 | -------------------------------------------------------------------------------- /neo4j-docker-desktop-pv/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: neo4j-docker-desktop-pv 3 | home: https://www.neo4j.com 4 | version: 2025.10.1-4 5 | appVersion: "-" 6 | description: Sets up persistent disks suitable for simple development tasks with Neo4j Helm when using Kubernetes provided by Docker Desktop 7 | keywords: 8 | - graph 9 | - database 10 | - cypher 11 | icon: http://info.neo4j.com/rs/773-GON-065/images/neo4j_logo.png 12 | sources: 13 | - https://github.com/neo4j/neo4j 14 | - https://github.com/neo4j/docker-neo4j 15 | maintainers: 16 | - name: bfeshti 17 | email: bledi.feshti@neotechnology.com 18 | - name: Rigert Alekaj 19 | email: rigert.alekaj@neo4j.com 20 | -------------------------------------------------------------------------------- /bin/gcloud-list-images: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This lists neo4j images uploaded into Google Container Repository by this project 4 | 5 | # make bash play nicely 6 | set -o pipefail -o errtrace -o errexit -o nounset 7 | shopt -s inherit_errexit 8 | [[ -n "${TRACE:-}" ]] && set -o xtrace 9 | 10 | # Required env vars 11 | CLOUDSDK_CORE_PROJECT="${CLOUDSDK_CORE_PROJECT:?CLOUDSDK_CORE_PROJECT is required}" 12 | 13 | GCR_REPO="eu.gcr.io/${CLOUDSDK_CORE_PROJECT}/neo4j-helm-chart" 14 | 15 | gcloud container images list-tags "${GCR_REPO}" 16 | 17 | echo "to pull and save use: docker pull ${GCR_REPO}:" 18 | echo "to save use: docker save -o neo4j-enterprise-X.Y.Z.tar ${GCR_REPO}:" 19 | -------------------------------------------------------------------------------- /neo4j-loadbalancer/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{- if .Release.IsInstall -}} 2 | 3 | Thank you for installing {{ .Chart.Name }}. 4 | 5 | Your release "{{ .Release.Name }}" has been installed in namespace "{{ .Release.Namespace }}". 6 | 7 | {{ else -}} 8 | 9 | You have updated {{ .Chart.Name }} in namespace "{{ .Release.Namespace }}". 10 | 11 | {{ end -}} 12 | 13 | To view the status of your Load Balancer service you can use 14 | $ kubectl get service -n {{ .Release.Namespace }} {{ include "neo4j.name" . }}-{{ .Values.serviceName.suffix | default "lb-neo4j" }} 15 | 16 | Once your Load Balancer has an External-IP assigned you can connect to your Neo4j cluster using "neo4j://:7474". 17 | -------------------------------------------------------------------------------- /internal/helpers/os.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "log" 7 | "os/exec" 8 | "strings" 9 | ) 10 | 11 | // RunCommand runs the command and returns its standard 12 | // output and standard error. 13 | func RunCommand(c *exec.Cmd) ([]byte, []byte, error) { 14 | log.Printf("Command := %s", strings.Join(c.Args, " ")) 15 | if c.Stdout != nil { 16 | return nil, nil, errors.New("exec: Stdout already set") 17 | } 18 | if c.Stderr != nil { 19 | return nil, nil, errors.New("exec: Stderr already set") 20 | } 21 | var a bytes.Buffer 22 | var b bytes.Buffer 23 | c.Stdout = &a 24 | c.Stderr = &b 25 | err := c.Run() 26 | return a.Bytes(), b.Bytes(), err 27 | } 28 | -------------------------------------------------------------------------------- /internal/resources/testData/secretMounts/seeduriS3SecretMounts.yaml: -------------------------------------------------------------------------------- 1 | # Test data for seedURI S3 credentials mounting 2 | neo4j: 3 | name: "seeduri-test" 4 | edition: "enterprise" 5 | acceptLicenseAgreement: "eval" 6 | 7 | volumes: 8 | data: 9 | mode: "defaultStorageClass" 10 | 11 | secretMounts: 12 | s3-credentials: 13 | secretName: "cloud-s3-credentials" 14 | mountPath: "/var/secrets/s3" 15 | items: 16 | - key: "access-key-id" 17 | path: "access-key" 18 | - key: "secret-access-key" 19 | path: "secret-key" 20 | - key: "endpoint" 21 | path: "endpoint" 22 | - key: "region" 23 | path: "region" 24 | defaultMode: 0600 25 | -------------------------------------------------------------------------------- /neo4j-admin/templates/_labels.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.labels" -}} 2 | {{- with . -}} 3 | {{- range $name, $value := . }} 4 | {{ $name }}: "{{ $value }}" 5 | {{- end -}} 6 | {{- end -}} 7 | {{- end }} 8 | 9 | {{- define "neo4j.annotations" -}} 10 | {{- with . -}} 11 | {{- range $name, $value := . }} 12 | {{ $name }}: "{{ $value }}" 13 | {{- end -}} 14 | {{- end -}} 15 | {{- end }} 16 | 17 | {{- define "neo4j.nodeSelector" -}} 18 | {{- if and (not (kindIs "invalid" .Values.nodeSelector) ) (not (empty .Values.nodeSelector) ) }} 19 | {{ printf "nodeSelector" | indent 10 }}: {{ .Values.nodeSelector | toYaml | nindent 12 }} 20 | {{- end }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /bin/gcloud-auth: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This authenticates with google cloud and sets up docker to use gcloud authentication for gcr.io repositories 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | function get_current_gcloud_account { 12 | gcloud auth list --filter="status=ACTIVE" --format="get(account)" 13 | } 14 | 15 | if [[ -z "$(get_current_gcloud_account)" ]]; then 16 | gcloud auth login --no-launch-browser 17 | fi 18 | 19 | echo "Using gcloud account: $(get_current_gcloud_account)" 20 | 21 | gcloud auth configure-docker --verbosity=error --quiet eu.gcr.io 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /bin/update-neo4j-conf: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This copies the neo4j.conf file from the ${NEO4J_DOCKER_IMG) docker image to the neo4j helm chart 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | NEO4J_DOCKER_IMG="${NEO4J_DOCKER_IMG:?NEO4J_DOCKER_IMG is required}" 13 | NEO4J_EDITION="${NEO4J_EDITION:?NEO4J_EDITION is required}" 14 | 15 | function overwrite_conf_using_docker { 16 | docker run --entrypoint cp -v "$(pwd)/neo4j-standalone/":/neo4j/ "${NEO4J_DOCKER_IMG}" /var/lib/neo4j/conf/neo4j.conf "/neo4j/neo4j-${NEO4J_EDITION}.conf" 17 | } 18 | 19 | overwrite_conf_using_docker 20 | -------------------------------------------------------------------------------- /neo4j/templates/_labels.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.labels" -}} 2 | {{- with .labels -}} 3 | {{- range $name, $value := . }} 4 | {{ $name | quote}}: {{ $value | quote }} 5 | {{- end -}} 6 | {{- end -}} 7 | {{- end }} 8 | 9 | {{- define "neo4j.nodeSelector" -}} 10 | {{- if not (empty .) }} 11 | nodeSelector: 12 | {{- with . -}} 13 | {{- range $name, $value := . }} 14 | {{ $name | quote}}: {{ $value | quote}} 15 | {{- end -}} 16 | {{- end -}} 17 | {{- end -}} 18 | {{- end }} 19 | 20 | {{- define "neo4j.annotations" -}} 21 | {{- with . -}} 22 | {{- range $name, $value := . }} 23 | {{ $name | quote }}: {{ $value | quote }} 24 | {{- end -}} 25 | {{- end -}} 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /neo4j/neo4j-operations/verifications.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | ) 7 | 8 | func checkRequiredVariables() error { 9 | _, present := os.LookupEnv("RELEASE_NAME") 10 | if !present { 11 | return fmt.Errorf("Please provide the env variable RELEASE_NAME ") 12 | } 13 | _, present = os.LookupEnv("NAMESPACE") 14 | if !present { 15 | return fmt.Errorf("Please provide the env variable NAMESPACE") 16 | } 17 | _, present = os.LookupEnv("SECRETNAME") 18 | if !present { 19 | return fmt.Errorf("please provide the env variable SECRETNAME") 20 | } 21 | _, present = os.LookupEnv("PROTOCOL") 22 | if !present { 23 | return fmt.Errorf("please provide the env variable PROTOCOL") 24 | } 25 | 26 | return nil 27 | } 28 | -------------------------------------------------------------------------------- /neo4j/templates/_loadbalancer.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.services.neo4j.defaultSpec" -}} 2 | ClusterIP: 3 | sessionAffinity: None 4 | NodePort: 5 | sessionAffinity: None 6 | externalTrafficPolicy: Local 7 | LoadBalancer: 8 | sessionAffinity: None 9 | externalTrafficPolicy: Local 10 | {{- end }} 11 | 12 | 13 | {{- define "neo4j.services.extraSpec" -}} 14 | {{- if hasKey . "type" }}{{ fail "field 'type' is not supported in Neo4j Helm Chart service.*.spec" }}{{ end }} 15 | {{- if hasKey . "selector" }}{{ fail "field 'selector' is not supported in Neo4j Helm Chart service.*.spec" }}{{ end }} 16 | {{- if hasKey . "ports" }}{{ fail "field 'ports' is not supported in Neo4j Helm Chart service.*.spec" }}{{ end }} 17 | {{ toYaml . }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /bin/gcloud-delete-gke-cluster: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This removes a Kubernetes cluster from GKE 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 13 | CLOUDSDK_CONTAINER_CLUSTER="${CLOUDSDK_CONTAINER_CLUSTER:?CLOUDSDK_CONTAINER_CLUSTER is required}" 14 | 15 | # delete the current Kubernetes cluster 16 | # 17 | gcloud container clusters delete --zone="${CLOUDSDK_COMPUTE_ZONE}" "${CLOUDSDK_CONTAINER_CLUSTER}" 18 | 19 | echo "This does not clean up any persistent disks that might have been created by this GKE cluster - those have to be cleaned up separately" 20 | -------------------------------------------------------------------------------- /neo4j-loadbalancer/templates/_loadbalancer.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.services.neo4j.defaultSpec" -}} 2 | ClusterIP: 3 | sessionAffinity: None 4 | NodePort: 5 | sessionAffinity: None 6 | externalTrafficPolicy: Local 7 | LoadBalancer: 8 | sessionAffinity: None 9 | externalTrafficPolicy: Local 10 | {{- end }} 11 | 12 | 13 | {{- define "neo4j.services.extraSpec" -}} 14 | {{- if hasKey . "type" }}{{ fail "field 'type' is not supported in Neo4j LoadBalancer Helm Chart service.*.spec" }}{{ end }} 15 | {{- if hasKey . "selector" }}{{ fail "field 'selector' is not supported in Neo4j LoadBalancer Helm Chart service.*.spec" }}{{ end }} 16 | {{- if hasKey . "ports" }}{{ fail "field 'ports' is not supported in Neo4j Helm LoadBalancerChart service .*.spec" }}{{ end }} 17 | {{ toYaml . }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /examples/multi-cluster/cluster-one-values.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: multicluster 3 | minimumClusterSize: 3 4 | acceptLicenseAgreement: "yes" 5 | edition: enterprise 6 | volumes: 7 | data: 8 | mode: defaultStorageClass 9 | services: 10 | neo4j: 11 | annotations: 12 | service.beta.kubernetes.io/azure-load-balancer-internal: "true" 13 | spec: 14 | loadBalancerIP: 10.30.1.101 15 | multiCluster: true 16 | config: 17 | dbms.cluster.discovery.resolver_type: LIST 18 | dbms.cluster.discovery.endpoints: "10.30.1.101:5000, 10.30.2.101:5000, 10.30.3.101:5000" 19 | server.cluster.advertised_address: "10.30.1.101:6000" 20 | server.cluster.raft.advertised_address: "10.30.1.101:7000" 21 | server.bolt.advertised_address: "10.30.1.101:7687" 22 | server.routing.advertised_address: "10.30.1.101:7688" 23 | -------------------------------------------------------------------------------- /examples/multi-cluster/cluster-two-values.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: multicluster 3 | minimumClusterSize: 3 4 | acceptLicenseAgreement: "yes" 5 | edition: enterprise 6 | volumes: 7 | data: 8 | mode: defaultStorageClass 9 | services: 10 | neo4j: 11 | annotations: 12 | service.beta.kubernetes.io/azure-load-balancer-internal: "true" 13 | spec: 14 | loadBalancerIP: 10.30.2.101 15 | multiCluster: true 16 | config: 17 | dbms.cluster.discovery.resolver_type: LIST 18 | dbms.cluster.discovery.endpoints: "10.30.1.101:5000, 10.30.2.101:5000, 10.30.3.101:5000" 19 | server.cluster.advertised_address: "10.30.2.101:6000" 20 | server.cluster.raft.advertised_address: "10.30.2.101:7000" 21 | server.bolt.advertised_address: "10.30.2.101:7687" 22 | server.routing.advertised_address: "10.30.2.101:7688" 23 | -------------------------------------------------------------------------------- /examples/multi-cluster/cluster-three-values.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: multicluster 3 | minimumClusterSize: 3 4 | acceptLicenseAgreement: "yes" 5 | edition: enterprise 6 | volumes: 7 | data: 8 | mode: defaultStorageClass 9 | services: 10 | neo4j: 11 | annotations: 12 | service.beta.kubernetes.io/azure-load-balancer-internal: "true" 13 | spec: 14 | loadBalancerIP: 10.30.3.101 15 | multiCluster: true 16 | config: 17 | dbms.cluster.discovery.resolver_type: LIST 18 | dbms.cluster.discovery.endpoints: "10.30.1.101:5000, 10.30.2.101:5000, 10.30.3.101:5000" 19 | server.cluster.advertised_address: "10.30.3.101:6000" 20 | server.cluster.raft.advertised_address: "10.30.3.101:7000" 21 | server.bolt.advertised_address: "10.30.3.101:7687" 22 | server.routing.advertised_address: "10.30.3.101:7688" 23 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/templates/_validations.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.reverseProxy.tlsValidation" -}} 2 | {{- if and $.Values.reverseProxy.ingress.enabled $.Values.reverseProxy.ingress.tls.enabled -}} 3 | {{- if empty $.Values.reverseProxy.ingress.tls.config -}} 4 | {{ fail (printf "Empty tls config !!") }} 5 | {{- end -}} 6 | {{- range $.Values.reverseProxy.ingress.tls.config -}} 7 | {{- $value := . -}} 8 | {{- if kindIs "invalid" $value.secretName -}} 9 | {{ fail (printf "Missing secretName for tls config") }} 10 | {{- end -}} 11 | {{- if empty ($value.secretName | trim) -}} 12 | {{ fail (printf "Empty secretName for tls config") }} 13 | {{- end -}} 14 | {{- end -}} 15 | {{- end -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /internal/integration_tests/test_model.go: -------------------------------------------------------------------------------- 1 | package integration_tests 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "strings" 7 | "time" 8 | 9 | "github.com/neo4j/helm-charts/internal/model" 10 | ) 11 | 12 | var ( 13 | TestRunIdentifier string 14 | ) 15 | 16 | var Neo4jConfFile = fmt.Sprintf("neo4j/neo4j-%s.conf", model.Neo4jEdition) 17 | 18 | func init() { 19 | dt := time.Now() 20 | // Include microseconds and random component for uniqueness 21 | dateTag := dt.Format("15:04:05.00 Mon") 22 | // Add a small random component to ensure uniqueness even within microseconds 23 | randomSuffix := rand.Intn(1000) 24 | dateTag = fmt.Sprintf("%s-%d", dateTag, randomSuffix) 25 | dateTag = strings.ReplaceAll(dateTag, " ", "-") 26 | dateTag = strings.ReplaceAll(dateTag, ":", "-") 27 | dateTag = strings.ReplaceAll(dateTag, ".", "-") 28 | TestRunIdentifier = strings.ToLower(dateTag) 29 | } 30 | -------------------------------------------------------------------------------- /internal/resources/testData/imagePullSecret/emptyImageCreds.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | imageCredentials: 3 | - registry: "https://index.docker.io/v1/" 4 | username: "" 5 | password: "samplepass" 6 | email: "sample@gmail.com" 7 | name: "sample1" 8 | - registry: "https://index.docker.io/v1/" 9 | username: "demo" 10 | password: "" 11 | email: "demo@gmail.com" 12 | name: "sample2" 13 | - registry: "https://index.docker.io/v1/" 14 | username: "demo1" 15 | password: "demo2 " 16 | email: "" 17 | name: "sample3" 18 | - registry: "https://index.docker.io/v1/" 19 | username: "sample4" 20 | password: "samplepass" 21 | email: "sample@gmail.com" 22 | name: "" 23 | - registry: "https://index.docker.io/v1/" 24 | username: "" 25 | password: "samplepass" 26 | email: "" 27 | name: "sample5" 28 | -------------------------------------------------------------------------------- /bin/README.md: -------------------------------------------------------------------------------- 1 | # bin/ 2 | 3 | This directory contains scripts that are useful for the development and testing of neo4j-helm charts. 4 | These scripts ARE NOT intended to be shipped with the helm chart or depended on at runtime in any way. 5 | These scripts ARE NOT permitted to assume that they are running in TeamCity (such scripts should go in the `build/` directory) 6 | 7 | ## Assumptions 8 | 9 | These scripts may assume: 10 | 11 | - `source devenv` has been run 12 | - All scripts in this directory are available on the path 13 | 14 | ## Dependencies 15 | 16 | - `kubectl` 17 | - `go` SDK 18 | - For gcloud development the `gcloud` cli tool 19 | 20 | ## Rules 21 | 22 | Scripts that are specific to a particular Cloud / Kubernetes provider should be prefixed with the provider name. 23 | 24 | - `gcloud-` for Google cloud 25 | - `kind-` for Kubernetes IN Docker 26 | - `aws-` for AWS 27 | 28 | etc. -------------------------------------------------------------------------------- /bin/gcloud-clean-gke-cluster: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This cleans resources related to a running GKE k8s cluster but it doesn't remove the cluster itself 4 | # it is useful to run this after tests that panic or are otherwise terminated unexpectedly 5 | 6 | # make bash play nicely 7 | # 8 | set -o pipefail -o errtrace -o errexit -o nounset 9 | shopt -s inherit_errexit 10 | [[ -n "${TRACE:-}" ]] && set -o xtrace 11 | 12 | # Delete expected resources 13 | kubectl delete namespace neo4j --ignore-not-found 14 | helm uninstall neo4j-pv || echo "not found ?" 15 | gcloud compute disks delete neo4j-data-disk || true 16 | gcloud compute disks delete neo4j-disk || true 17 | 18 | # Required env var! 19 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 20 | 21 | gcloud filestore instances delete "neo4j-filestore" --zone="${CLOUDSDK_COMPUTE_ZONE}" || true 22 | 23 | echo "cleanup complete" 24 | -------------------------------------------------------------------------------- /examples/bloom-gds-license/gds-bloom-with-license.yaml: -------------------------------------------------------------------------------- 1 | neo4j: 2 | name: licenses 3 | acceptLicenseAgreement: "yes" 4 | edition: enterprise 5 | volumes: 6 | data: 7 | mode: defaultStorageClass 8 | licenses: 9 | mode: volume 10 | disableSubPathExpr: true 11 | volume: 12 | secret: 13 | secretName: gds-bloom-license 14 | items: 15 | - key: gds.license 16 | path: gds.license 17 | - key: bloom.license 18 | path: bloom.license 19 | env: 20 | NEO4J_PLUGINS: '["graph-data-science", "bloom"]' 21 | config: 22 | gds.enterprise.license_file: "/licenses/gds.license" 23 | dbms.security.procedures.unrestricted: "gds.*,apoc.*,bloom.*" 24 | server.unmanaged_extension_classes: "com.neo4j.bloom.server=/bloom,semantics.extension=/rdf" 25 | dbms.security.http_auth_allowlist: "/,/browser.*,/bloom.*" 26 | dbms.bloom.license_file: "/licenses/bloom.license" 27 | -------------------------------------------------------------------------------- /internal/integration_tests/gcloud/model.go: -------------------------------------------------------------------------------- 1 | package gcloud 2 | 3 | import ( 4 | "log" 5 | "os" 6 | ) 7 | 8 | type Project string 9 | type Zone string 10 | type Region string 11 | type ContainerCluster string 12 | 13 | func CurrentProject() Project { 14 | return Project(getRequired("CLOUDSDK_CORE_PROJECT")) 15 | } 16 | func CurrentZone() Zone { 17 | return Zone(getRequired("CLOUDSDK_COMPUTE_ZONE")) 18 | } 19 | func CurrentRegion() Region { 20 | return Region(getRequired("CLOUDSDK_COMPUTE_REGION")) 21 | } 22 | func CurrentCluster() ContainerCluster { 23 | return ContainerCluster(getRequired("CLOUDSDK_CONTAINER_CLUSTER")) 24 | } 25 | 26 | func getRequired(envKey string) string { 27 | value, found := os.LookupEnv(envKey) 28 | if !found { 29 | log.Panicf("Environment variable %s is required but was not set", envKey) 30 | } 31 | return value 32 | } 33 | 34 | func init() { 35 | os.Setenv("CLOUDSDK_CORE_DISABLE_PROMPTS", "True") 36 | } 37 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/install-example-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=dedicated-storage-class 4 | 5 | helm_install() { 6 | if ! kubectl get daemonset ebs-csi-node -n kube-system &> /dev/null; then 7 | echo "WARNING: EBS CSI Driver not found, this example will not work." 8 | echo "See https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html for instructions to install driver" 9 | fi 10 | pushd "${PROJECT_ROOT}" > /dev/null || exit 11 | kubectl create secret generic neo4j-auth --from-literal=NEO4J_AUTH=neo4j/password123 12 | kubectl apply -f examples/dedicated-storage-class-cluster/aws-storage-class.yaml 13 | for i in {1..3}; do 14 | helm install "${RELEASE_NAME}-${i}" neo4j -fexamples/dedicated-storage-class-cluster/dedicated-storage-class.yaml 15 | done 16 | } 17 | 18 | helm_install 19 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/reverse-proxy/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net/http" 7 | "os" 8 | "reverse-proxy/operations" 9 | "reverse-proxy/proxy" 10 | ) 11 | 12 | func main() { 13 | 14 | startup() 15 | 16 | h, err := proxy.NewHandle() 17 | if err != nil { 18 | log.Fatal(err) 19 | } 20 | http.Handle("/", h) 21 | 22 | domain := fmt.Sprintf("0.0.0.0:%s", os.Getenv("PORT")) 23 | log.Printf("Listening on %s", domain) 24 | 25 | log.Fatal(http.ListenAndServe(domain, nil)) 26 | } 27 | 28 | func startup() { 29 | errors := operations.CheckEnvVariables() 30 | if len(errors) != 0 { 31 | log.Fatalf("%v", errors) 32 | } 33 | 34 | hostname := fmt.Sprintf("%s.%s.svc.%s", os.Getenv("SERVICE_NAME"), os.Getenv("NAMESPACE"), os.Getenv("DOMAIN")) 35 | if ip, present := os.LookupEnv("IP"); present { 36 | hostname = ip 37 | } 38 | 39 | err := operations.CheckConnectivity(hostname) 40 | if err != nil { 41 | log.Fatal(err) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- template "neo4j.reverseProxy.tlsValidation" . -}} 2 | {{- if $.Values.reverseProxy.ingress.enabled -}} 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: {{ include "neo4j.fullname" . }}-reverseproxy-ingress 7 | namespace: "{{ .Release.Namespace }}" 8 | {{- include "neo4j.annotations" $.Values.reverseProxy.ingress.annotations | indent 2 }} 9 | spec: 10 | ingressClassName: "{{ .Values.reverseProxy.ingress.className | default "nginx" }}" 11 | {{- include "neo4j.ingress.tls" . | indent 2 }} 12 | rules: 13 | - http: 14 | paths: 15 | - pathType: Prefix 16 | backend: 17 | service: 18 | name: {{ include "neo4j.fullname" . }}-reverseproxy-service 19 | port: 20 | number: {{ include "neo4j.reverseProxy.port" . }} 21 | path: / 22 | {{- include ".neo4j.ingress.host" . | indent 6 -}} 23 | {{- end -}} 24 | -------------------------------------------------------------------------------- /bin/gcloud-create-persistent-disk: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This creates the expected persistent disk 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | 12 | # Required env vars 13 | DISK_NAME="${1:?Missing argument. Usage: gcloud-create-persistent-disk }" 14 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 15 | 16 | 17 | # Local vars 18 | DISK_SIZE="${DISK_SIZE:-100Gi}" 19 | NAMESPACE="${NAMESPACE:-default}" 20 | 21 | 22 | # Create disk if it doesn't already exist 23 | gcloud compute disks describe "${DISK_NAME}" 1>&2 || gcloud compute disks create --size "${DISK_SIZE}" --type pd-ssd --zone="${CLOUDSDK_COMPUTE_ZONE}" "${DISK_NAME}" 1>&2 24 | 25 | # Print the necessary yaml for use with our pv helm charts 26 | cat << EOF 27 | data: 28 | capacity: 29 | storage: "${DISK_SIZE}" 30 | gcePersistentDisk: "${DISK_NAME}" 31 | EOF 32 | -------------------------------------------------------------------------------- /internal/backup/operations.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "path/filepath" 7 | ) 8 | 9 | func DeleteBackupFiles(backupFileNames, consistencyCheckReports []string) error { 10 | if value, present := os.LookupEnv("KEEP_BACKUP_FILES"); present && value == "false" { 11 | backupDir := "/backups" // default 12 | if dir, exists := os.LookupEnv("BACKUP_DIR"); exists { 13 | backupDir = dir 14 | } 15 | 16 | for _, backupFileName := range backupFileNames { 17 | filePath := filepath.Join(backupDir, backupFileName) 18 | log.Printf("Deleting file %s", filePath) 19 | err := os.Remove(filePath) 20 | if err != nil { 21 | return err 22 | } 23 | } 24 | for _, consistencyCheckReportName := range consistencyCheckReports { 25 | filePath := filepath.Join(backupDir, consistencyCheckReportName) 26 | log.Printf("Deleting file %s", filePath) 27 | err := os.Remove(filePath) 28 | if err != nil { 29 | return err 30 | } 31 | } 32 | } 33 | return nil 34 | } 35 | -------------------------------------------------------------------------------- /internal/integration_tests/volumes_test.go: -------------------------------------------------------------------------------- 1 | package integration_tests 2 | 3 | import ( 4 | "github.com/neo4j/helm-charts/internal/model" 5 | "github.com/neo4j/helm-charts/internal/resources" 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | import "testing" 9 | 10 | func TestVolumesInGCloudK8s(t *testing.T) { 11 | chart := model.Neo4jHelmChartCommunityAndEnterprise 12 | releaseName := model.NewReleaseName("volumes-" + TestRunIdentifier) 13 | t.Parallel() 14 | 15 | t.Logf("Starting setup of '%s'", t.Name()) 16 | defaultHelmArgs := []string{} 17 | defaultHelmArgs = append(defaultHelmArgs, model.DefaultNeo4jNameArg...) 18 | defaultHelmArgs = append(defaultHelmArgs, resources.TestAntiAffinityRule.HelmArgs()...) 19 | _, err := installNeo4j(t, releaseName, chart, defaultHelmArgs...) 20 | t.Cleanup(standaloneCleanup(t, releaseName)) 21 | 22 | if !assert.NoError(t, err) { 23 | return 24 | } 25 | 26 | t.Logf("Succeeded with setup of '%s'", t.Name()) 27 | 28 | runSubTests(t, volumesTests(releaseName, chart)) 29 | } 30 | -------------------------------------------------------------------------------- /examples/multi-cluster/README.md: -------------------------------------------------------------------------------- 1 | # Deploy Neo4j cluster multiple Kubernetes Clusters 2 | 3 | See docs for details https://neo4j.com/docs/operations-manual/4.4/kubernetes/multi-dc-cluster/aks/ 4 | 5 | This is not a common deployment scenario, this example only serves to demonstrate the configuration for customers 6 | that require this feature. 7 | 8 | Example requires `Microsoft.Authorization/roleAssignments/write` and `Microsoft.Authorization/roleAssignments/delete` 9 | permissions in Azure. 10 | 11 | The example will 12 | 13 | * deploy 3 AKS clusters 14 | * deploy a Neo4j cluster with a server on each AKS cluster. 15 | * The loadbalancer service will use a private IP that is available across all the AZ clusters. This is because the loadbalancer exposes internal clustering ports 16 | * create an azure application gateway to access the cluster 17 | 18 | # Deploy multi zone cluster 19 | ```shell 20 | ./multi-cluster-example-aks.sh 21 | ``` 22 | 23 | # Cleanup example 24 | ```shell 25 | ./multi-cluster-cleanup-aks.sh 26 | ``` 27 | -------------------------------------------------------------------------------- /neo4j/templates/neo4j-servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.serviceMonitor.enabled -}} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ include "neo4j.fullname" . }}-servicemonitor 6 | namespace: "{{ .Release.Namespace }}" 7 | {{- if .Values.serviceMonitor.labels }} 8 | labels: {{ toYaml $.Values.serviceMonitor.labels | nindent 4 }} 9 | {{- end }} 10 | spec: 11 | endpoints: 12 | - port: {{ .Values.serviceMonitor.port | quote }} 13 | interval: {{ .Values.serviceMonitor.interval }} 14 | path: {{ .Values.serviceMonitor.path }} 15 | {{- if .Values.serviceMonitor.jobLabel }} 16 | jobLabel: {{ .Values.serviceMonitor.jobLabel | quote }} 17 | {{- end }} 18 | namespaceSelector: {{ toYaml .Values.serviceMonitor.namespaceSelector | nindent 4 }} 19 | {{- if .Values.serviceMonitor.targetLabels }} 20 | targetLabels: {{ toYaml $.Values.serviceMonitor.targetLabels | nindent 4 }} 21 | {{- end }} 22 | selector: {{ toYaml $.Values.serviceMonitor.selector | nindent 4 }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /.github/workflows/buildDockerfile.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker File (Dev) 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - .github/workflows/buildDockerfile.yml 8 | - build/Dockerfile 9 | branches: 10 | - dev 11 | 12 | jobs: 13 | build-docker-file: 14 | name: Build Docker file 15 | runs-on: ubuntu-latest 16 | # env: 17 | # ResourceGroupLocation: northeurope 18 | steps: 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v3 21 | 22 | - name: Login to Docker Hub 23 | uses: docker/login-action@v3 24 | with: 25 | registry: europe-west2-docker.pkg.dev 26 | username: _json_key 27 | password: ${{ secrets.GCLOUD_SERVICE_KEY }} 28 | 29 | - name: Build and push 30 | uses: docker/build-push-action@v5 31 | with: 32 | context: "{{defaultContext}}:build" 33 | push: true 34 | tags: ${{ vars.ARTIFACT_REGISTRY_REPO_NAME }}/githubactions:latest 35 | provenance: false 36 | -------------------------------------------------------------------------------- /examples/dedicated-storage-class-cluster/README.md: -------------------------------------------------------------------------------- 1 | # Example - Install Neo4j Cluster using a dedicated storage class 2 | 3 | This example uses a dynamically provisioned volumes using a dedicated storage class. 4 | A `neo4j-data` StorageClass is created, then PVCs are dynamically created for each cluster server using the storage class. 5 | 6 | The example will use the following Helm values 7 | ```yaml 8 | neo4j: 9 | name: dedicated-storage-class 10 | minimumClusterSize: 3 11 | acceptLicenseAgreement: "yes" 12 | edition: enterprise 13 | volumes: 14 | data: 15 | mode: dynamic 16 | dynamic: 17 | storageClassName: "neo4j-data" 18 | accessModes: 19 | - ReadWriteOnce 20 | requests: 21 | storage: 100Gi 22 | ``` 23 | 24 | ## Install in AWS 25 | ```shell 26 | ./install-example-aws.sh 27 | ``` 28 | 29 | ## Install in GCP 30 | ```shell 31 | ./install-example-gcp.sh 32 | ``` 33 | 34 | ## Install in Azure 35 | ```shell 36 | 37 | ./install-example-azure.sh 38 | ``` 39 | 40 | ## Cleanup the example 41 | ```shell 42 | ./cleanup.sh 43 | ``` 44 | -------------------------------------------------------------------------------- /bin/gcloud/gpg_signing: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # make bash play nicely 3 | # 4 | set -o pipefail -o errtrace -o errexit -o nounset 5 | shopt -s inherit_errexit 6 | [[ -n "${TRACE:-}" ]] && set -o xtrace 7 | 8 | ROLE_ARN_GPG="${ROLE_ARN_GPG:?ROLE_ARN_GPG is required}" 9 | SECRET_ID_GPG="${SECRET_ID_GPG:?SECRET_ID_GPG is required}" 10 | 11 | #mkdir -p /root/.aws 12 | mkdir -p $HOME/.aws 13 | 14 | echo "[profile secrets] 15 | region = eu-west-1 16 | output = json 17 | role_arn = ${ROLE_ARN_GPG} 18 | credential_source = Environment" | tee $HOME/.aws/config 19 | #credential_source = Environment" | tee /root/.aws/config 20 | 21 | # fetch the key 22 | aws --profile secrets secretsmanager get-secret-value --secret-id ${SECRET_ID_GPG} | jq --raw-output '.SecretString' | jq --raw-output '.privateKey' | base64 --decode > signing-key.asc 23 | 24 | # stick it in a sandbox 25 | mkdir signingkeysandbox 26 | chmod 700 signingkeysandbox 27 | gpg --homedir signingkeysandbox --batch --import signing-key.asc || true 28 | #gpg --homedir signingkeysandbox --list-secret-keys 29 | # cleanup 30 | rm signing-key.asc 31 | -------------------------------------------------------------------------------- /bin/gcloud/auth: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This authenticates with google cloud using a service account key (for TeamCity) 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | GCLOUD_SERVICE_KEY="${GCLOUD_SERVICE_KEY:?GCLOUD_SERVICE_KEY is required}" 13 | 14 | # Params 15 | SERVICE_KEY_FILE="gcloud.key" 16 | 17 | # Auth with gcloud 18 | #echo "${GCLOUD_SERVICE_KEY}" | base64 -d > "${SERVICE_KEY_FILE}" 19 | echo "${GCLOUD_SERVICE_KEY}" > "${SERVICE_KEY_FILE}" 20 | gcloud auth activate-service-account --key-file="${SERVICE_KEY_FILE}" 21 | rm "${SERVICE_KEY_FILE}" 22 | 23 | function get_current_gcloud_account { 24 | gcloud auth list --filter="status=ACTIVE" --format="get(account)" 25 | } 26 | 27 | if [[ -z "$(get_current_gcloud_account)" ]]; then 28 | gcloud auth login --no-launch-browser 29 | fi 30 | 31 | echo "Using gcloud account: $(get_current_gcloud_account)" 32 | 33 | gcloud auth configure-docker --verbosity=error --quiet eu.gcr.io 34 | 35 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/install-example-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-manual 4 | readonly GCP_ZONE="$(gcloud config get compute/zone)" 5 | readonly GCP_PROJECT="$(gcloud config get project)" 6 | 7 | helm_install() { 8 | pushd "${PROJECT_ROOT}" > /dev/null || exit 9 | gcloud compute disks create --size 10Gi --type pd-ssd "${RELEASE_NAME}" 10 | helm install "${RELEASE_NAME}"-disk neo4j-persistent-volume \ 11 | --set neo4j.name="${RELEASE_NAME}" \ 12 | --set data.driver=pd.csi.storage.gke.io \ 13 | --set data.storageClassName="manual" \ 14 | --set data.reclaimPolicy="Delete" \ 15 | --set data.createPvc=true \ 16 | --set data.createStorageClass=false \ 17 | --set data.volumeHandle="projects/${GCP_PROJECT}/zones/${GCP_ZONE}/disks/${RELEASE_NAME}" \ 18 | --set data.capacity.storage=10Gi 19 | helm install "${RELEASE_NAME}" neo4j -f examples/persistent-volume-manual/persistent-volume-manual.yaml 20 | } 21 | 22 | helm_install 23 | -------------------------------------------------------------------------------- /neo4j-headless-service/templates/_image.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.defaultChartImage" -}} 2 | {{- $isEnterprise := required "neo4j.edition must be specified" .Values.neo4j.edition | regexMatch "(?i)enterprise" -}} 3 | {{- $imageName := "neo4j:" -}} 4 | {{/* .Chart.AppVersion is set to "-" for headless and loadbalancer service*/}} 5 | {{- if eq $.Chart.AppVersion "-" -}} 6 | {{- $imageName = printf "%s%s" $imageName $.Chart.Version -}} 7 | {{- else -}} 8 | {{- $imageName = printf "%s%s" $imageName $.Chart.AppVersion -}} 9 | {{- end -}} 10 | {{- if $isEnterprise -}} 11 | {{- $imageName = printf "%s%s" $imageName "-enterprise" -}} 12 | {{- end -}} 13 | {{- $imageName -}} 14 | {{- end -}} 15 | 16 | 17 | {{- define "neo4j.image" -}} 18 | {{- template "neo4j.checkLicenseAgreement" . -}} 19 | {{- $image := include "neo4j.defaultChartImage" . -}} 20 | {{/* Allow override if a custom image has been specified */}} 21 | {{- if .Values.image -}} 22 | {{- if .Values.image.customImage -}} 23 | {{- $image = .Values.image.customImage -}} 24 | {{- end -}} 25 | {{- end -}} 26 | {{ $image }} 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /bin/gcloud-create-persistence: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This creates the expected persistent disk and filestore in Google Cloud and attaches them to K8s Persistent Volumes 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | RELEASE_NAME="${1:?Missing argument. Usage: gcloud-create-persistence }" 13 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 14 | 15 | # Optional env vars 16 | NAMESPACE="${NAMESPACE:-default}" 17 | 18 | # Fixed / derived env vars 19 | FILESTORE_NAME="${RELEASE_NAME}-filestore" 20 | PERSISTENT_DISK_NAME="${RELEASE_NAME}-disk" 21 | PV_HELM_NAME="${RELEASE_NAME}-pv" 22 | 23 | # configure kubectl 24 | gcloud-configure-kubectl 1>&2 25 | 26 | ( gcloud-create-persistent-disk "${PERSISTENT_DISK_NAME}" & gcloud-create-filestore "${FILESTORE_NAME}" ) | tee >(cat 1>&2) | helm install -f - \ 27 | --namespace "${NAMESPACE}" "${PV_HELM_NAME}" ./neo4j-gcloud-pv \ 28 | --set neo4j.name="${RELEASE_NAME}" \ 29 | 1>&2 30 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/install-example-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | readonly GCP_ZONE="$(gcloud config get compute/zone)" 5 | readonly GCP_PROJECT="$(gcloud config get project)" 6 | 7 | helm_install() { 8 | pushd "${PROJECT_ROOT}" > /dev/null || exit 9 | gcloud compute disks create --size 10Gi --type pd-ssd "${RELEASE_NAME}" 10 | helm install "${RELEASE_NAME}"-disk neo4j-persistent-volume \ 11 | --set neo4j.name="${RELEASE_NAME}" \ 12 | --set data.driver=pd.csi.storage.gke.io \ 13 | --set data.storageClassName="manual" \ 14 | --set data.reclaimPolicy="Delete" \ 15 | --set data.createPvc=false \ 16 | --set data.createStorageClass=true \ 17 | --set data.volumeHandle="projects/${GCP_PROJECT}/zones/${GCP_ZONE}/disks/${RELEASE_NAME}" \ 18 | --set data.capacity.storage=10Gi 19 | helm install "${RELEASE_NAME}" neo4j -f examples/persistent-volume-selector-standalone/persistent-volume-selector.yaml 20 | } 21 | 22 | helm_install 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Helm-Charts 2 | 3 | This repository contains Helm charts that supports both Neo4j standalone and Neo4j clusters 4 | 5 | Helm charts for Neo4j clusters are supported from version >= 4.4.0 6 | 7 | Helm charts can be downloaded from [here](https://neo4j.com/deployment-center/#tools-tab) 8 | 9 | [Full Documentation can be found here](https://neo4j.com/docs/operations-manual/current/kubernetes/) 10 | 11 | ## Examples 12 | See the `examples` directory for common usage patterns of this Helm Chart 13 | 14 | * [Dynamic volumes with dedicated storage class](../dev/examples/dedicated-storage-class-cluster/README.md) 15 | * [Using Bloom and GDS Plugins](../dev/examples/bloom-gds-license/README.md) 16 | * [Manually created disks with a volume selector (Standalone)](../dev/examples/persistent-volume-selector-standalone/README.md) 17 | * [Manually created disks with a volume selector (Cluster)](../dev/examples/persistent-volume-selector-cluster/README.md) 18 | * [Manually created disks with a pre provisioned PVC](../dev/examples/persistent-volume-manual/README.md) 19 | * [Multi AKS cluster](../dev/examples/multi-cluster/README.md) 20 | 21 | 22 | -------------------------------------------------------------------------------- /neo4j-docker-desktop-pv/templates/persistentvolume.yaml: -------------------------------------------------------------------------------- 1 | kind: "PersistentVolume" 2 | apiVersion: "v1" 3 | metadata: 4 | # n.b. persistent volumes don't seem to belong to namespaces 5 | name: "{{ .Release.Name }}" 6 | labels: 7 | app: "{{ template "neo4j.appName" . }}" 8 | helm.neo4j.com/volume-role: "data" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | capacity: 13 | storage: "{{ required "capacity.storage is required (e.g. 10Gi)" .Values.capacity.storage }}" 14 | hostPath: 15 | path: "{{ required "hostPath is required (e.g. /tmp/neo4j)" .Values.hostPath }}" 16 | type: Directory 17 | persistentVolumeReclaimPolicy: Delete 18 | --- 19 | apiVersion: v1 20 | kind: PersistentVolumeClaim 21 | metadata: 22 | name: "{{ .Release.Name }}" 23 | labels: 24 | app: "{{ template "neo4j.appName" . }}" 25 | helm.neo4j.com/volume-role: "data" 26 | spec: 27 | storageClassName: "" 28 | volumeName: "{{ .Release.Name }}" 29 | accessModes: 30 | - ReadWriteOnce 31 | resources: 32 | requests: 33 | storage: "{{ required "capacity.storage is required (e.g. 10Gi)" .Values.capacity.storage }}" 34 | -------------------------------------------------------------------------------- /neo4j/templates/_licensing.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.checkLicenseAgreement" }} 2 | {{- $isEnterprise := required "neo4j.edition must be specified" .Values.neo4j.edition | regexMatch "(?i)enterprise" -}} 3 | {{- if $isEnterprise }} 4 | {{- if not (kindIs "string" .Values.neo4j.acceptLicenseAgreement) | or (not .Values.neo4j.acceptLicenseAgreement) }} 5 | {{- include "neo4j.licenseAgreementMessage" .Values.neo4j.acceptLicenseAgreement | fail }} 6 | {{- else }} 7 | {{- if and (ne .Values.neo4j.acceptLicenseAgreement "yes") (ne .Values.neo4j.acceptLicenseAgreement "eval") }} 8 | {{- include "neo4j.licenseAgreementMessage" .Values.neo4j.acceptLicenseAgreement | fail }} 9 | {{- end }} 10 | {{- end }} 11 | {{- end }} 12 | {{- end }} 13 | 14 | {{- define "neo4j.licenseAgreementMessage" }} 15 | 16 | In order to use Neo4j Enterprise Edition you must have a Neo4j license agreement. 17 | More information is available at: https://neo4j.com/licensing/ 18 | Email inquiries can be directed to: licensing@neo4j.com 19 | Set neo4j.acceptLicenseAgreement: "yes" or neo4j.acceptLicenseAgreement: "eval" to confirm that you have a Neo4j license agreement. 20 | 21 | {{ end -}} 22 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/reverse-proxy/proxy/handle.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net/http" 7 | "net/http/httputil" 8 | "os" 9 | ) 10 | 11 | type Handle struct { 12 | HostName string 13 | BoltProxy *httputil.ReverseProxy 14 | Neo4jProxy *httputil.ReverseProxy 15 | } 16 | 17 | func NewHandle() (*Handle, error) { 18 | hostname := fmt.Sprintf("%s.%s.svc.%s", os.Getenv("SERVICE_NAME"), os.Getenv("NAMESPACE"), os.Getenv("DOMAIN")) 19 | log.Printf("Hostname := %s", hostname) 20 | if ip, present := os.LookupEnv("IP"); present { 21 | hostname = ip 22 | } 23 | neo4jProxy, err := httpProxy(hostname) 24 | if err != nil { 25 | return nil, err 26 | } 27 | bProxy, err := boltProxy(hostname) 28 | if err != nil { 29 | return nil, err 30 | } 31 | return &Handle{ 32 | HostName: hostname, 33 | BoltProxy: bProxy, 34 | Neo4jProxy: neo4jProxy, 35 | }, nil 36 | } 37 | 38 | func (h Handle) ServeHTTP(responseWriter http.ResponseWriter, request *http.Request) { 39 | 40 | proxy := h.Neo4jProxy 41 | if request.Header.Get("Upgrade") == "websocket" { 42 | proxy = h.BoltProxy 43 | } 44 | proxy.ServeHTTP(responseWriter, request) 45 | } 46 | -------------------------------------------------------------------------------- /neo4j/templates/neo4j-pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget.enabled }} 2 | --- 3 | apiVersion: "policy/v1" 4 | kind: PodDisruptionBudget 5 | metadata: 6 | name: {{ include "neo4j.fullname" . }} 7 | namespace: {{ .Release.Namespace }} 8 | {{- if .Values.podDisruptionBudget.labels }} 9 | labels: {{ toYaml $.Values.podDisruptionBudget.labels | nindent 4 }} 10 | {{- end }} 11 | spec: 12 | {{- if .Values.podDisruptionBudget.minAvailable }} 13 | minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} 14 | {{- end }} 15 | {{- if .Values.podDisruptionBudget.maxUnavailable }} 16 | maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} 17 | {{- end }} 18 | selector: 19 | {{- if .Values.podDisruptionBudget.matchExpressions }} 20 | matchExpressions: {{ toYaml .Values.podDisruptionBudget.matchExpressions | nindent 6 }} 21 | {{- end }} 22 | matchLabels: 23 | app: "{{ template "neo4j.name" . }}" 24 | helm.neo4j.com/instance: "{{ include "neo4j.fullname" . }}" 25 | {{- if .Values.podDisruptionBudget.matchLabels }} 26 | {{ toYaml .Values.podDisruptionBudget.matchLabels }} 27 | {{- end }} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /neo4j-headless-service/templates/_licensing.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.checkLicenseAgreement" }} 2 | {{- $isEnterprise := required "neo4j.edition must be specified" .Values.neo4j.edition | regexMatch "(?i)enterprise" -}} 3 | {{- if $isEnterprise }} 4 | {{- if not (kindIs "string" .Values.neo4j.acceptLicenseAgreement) | or (not .Values.neo4j.acceptLicenseAgreement) }} 5 | {{- include "neo4j.licenseAgreementMessage" .Values.neo4j.acceptLicenseAgreement | fail }} 6 | {{- else }} 7 | {{- if and (ne .Values.neo4j.acceptLicenseAgreement "yes") (ne .Values.neo4j.acceptLicenseAgreement "eval") }} 8 | {{- include "neo4j.licenseAgreementMessage" .Values.neo4j.acceptLicenseAgreement | fail }} 9 | {{- end }} 10 | {{- end }} 11 | {{- end }} 12 | {{- end }} 13 | 14 | {{- define "neo4j.licenseAgreementMessage" }} 15 | 16 | In order to use Neo4j Enterprise Edition you must have a Neo4j license agreement. 17 | More information is available at: https://neo4j.com/licensing/ 18 | Email inquiries can be directed to: licensing@neo4j.com 19 | Set neo4j.acceptLicenseAgreement: "yes" or neo4j.acceptLicenseAgreement: "eval" to confirm that you have a Neo4j license agreement. 20 | 21 | {{ end -}} 22 | -------------------------------------------------------------------------------- /neo4j/templates/neo4j-env.yaml: -------------------------------------------------------------------------------- 1 | {{- $authDisabled := index .Values.config "dbms.security.auth_enabled" | default "" | regexMatch "(?i)no|false" -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: "{{ include "neo4j.fullname" . }}-env" 6 | namespace: "{{ .Release.Namespace }}" 7 | labels: 8 | app: "{{ template "neo4j.name" $ }}" 9 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 10 | data: 11 | # It should not be necessary for neo4j users/administrators to modify this configMap 12 | # Neo4j configuration is set in the {{ include "neo4j.fullname" . }}-user-config ConfigMap 13 | {{- if or (eq .Values.neo4j.acceptLicenseAgreement "yes") (eq .Values.neo4j.acceptLicenseAgreement "eval") }} 14 | NEO4J_ACCEPT_LICENSE_AGREEMENT: {{ .Values.neo4j.acceptLicenseAgreement | quote }} 15 | {{- end }} 16 | {{- if not $authDisabled }} 17 | NEO4J_AUTH_PATH: "/config/neo4j-auth/NEO4J_AUTH" 18 | {{- end }} 19 | NEO4J_EDITION: "{{ .Values.neo4j.edition | upper }}_K8S" 20 | NEO4J_CONF: "/config/" 21 | K8S_NEO4J_NAME: "{{ template "neo4j.name" . }}" 22 | EXTENDED_CONF: "yes" 23 | {{- if .Values.env }} 24 | {{- .Values.env | toYaml | nindent 2 }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /examples/secret-mounts/seeduri-s3-values.yaml: -------------------------------------------------------------------------------- 1 | # Example values.yaml for mounting S3 credentials for seedURI operations 2 | 3 | neo4j: 4 | name: "my-neo4j" 5 | edition: "enterprise" 6 | acceptLicenseAgreement: "eval" 7 | 8 | # Standard volume configuration 9 | volumes: 10 | data: 11 | mode: "defaultStorageClass" 12 | defaultStorageClass: 13 | accessModes: 14 | - ReadWriteOnce 15 | requests: 16 | storage: 50Gi 17 | 18 | # Mount S3 credentials securely 19 | secretMounts: 20 | s3-credentials: 21 | secretName: "cloud-s3-credentials" 22 | mountPath: "/var/secrets/s3" 23 | items: 24 | - key: "access-key-id" 25 | path: "access-key" 26 | - key: "secret-access-key" 27 | path: "secret-key" 28 | - key: "endpoint" 29 | path: "endpoint" 30 | - key: "region" 31 | path: "region" 32 | # Set restrictive permissions for security 33 | defaultMode: 0600 34 | 35 | # Example Neo4j configuration 36 | config: 37 | # Enable authentication 38 | dbms.security.auth_enabled: "true" 39 | 40 | # Optional: Configure services as needed 41 | services: 42 | neo4j: 43 | enabled: true 44 | admin: 45 | enabled: true 46 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/install-example-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | readonly GCP_ZONE="$(gcloud config get compute/zone)" 5 | readonly GCP_PROJECT="$(gcloud config get project)" 6 | 7 | helm_install() { 8 | pushd "${PROJECT_ROOT}" > /dev/null || exit 9 | for i in {1..3}; do 10 | gcloud compute disks create --size 10Gi --type pd-ssd "${RELEASE_NAME}-${i}" 11 | helm install "${RELEASE_NAME}-disk-${i}" neo4j-persistent-volume \ 12 | --set neo4j.name="${RELEASE_NAME}" \ 13 | --set data.driver=pd.csi.storage.gke.io \ 14 | --set data.storageClassName="manual" \ 15 | --set data.reclaimPolicy="Delete" \ 16 | --set data.createPvc=false \ 17 | --set data.createStorageClass=true \ 18 | --set data.volumeHandle="projects/${GCP_PROJECT}/zones/${GCP_ZONE}/disks/${RELEASE_NAME}" \ 19 | --set data.capacity.storage=10Gi 20 | helm install "${RELEASE_NAME}-${i}" neo4j -f examples/persistent-volume-selector/persistent-volume-selector-cluster.yaml 21 | done 22 | } 23 | 24 | helm_install 25 | -------------------------------------------------------------------------------- /internal/integration_tests/persistent_data.go: -------------------------------------------------------------------------------- 1 | package integration_tests 2 | 3 | import ( 4 | "github.com/neo4j/helm-charts/internal/model" 5 | "testing" 6 | ) 7 | 8 | func ResourcesCleanup(t *testing.T, releaseName model.ReleaseName) error { 9 | return run(t, "helm", "uninstall", releaseName.String(), "--namespace", string(releaseName.Namespace()), "--wait", "--timeout=3m") 10 | } 11 | 12 | func ResourcesReinstall(t *testing.T, releaseName model.ReleaseName, chart model.Neo4jHelmChartBuilder) error { 13 | 14 | defaultHelmArgs := []string{} 15 | defaultHelmArgs = append(defaultHelmArgs, model.DefaultNeo4jNameArg...) 16 | defaultHelmArgs = append(defaultHelmArgs, "--wait", "--timeout", "300s") 17 | err := run(t, "helm", model.BaseHelmCommand("install", releaseName, chart, model.Neo4jEdition, defaultHelmArgs...)...) 18 | if err != nil { 19 | t.Log("Helm Install failed:", err) 20 | _ = run(t, "kubectl", "get", "events") 21 | return err 22 | } 23 | err = run(t, "kubectl", "--namespace", string(releaseName.Namespace()), "rollout", "status", "--watch", "--timeout=120s", "statefulset/"+releaseName.String()) 24 | if err != nil { 25 | t.Log("Helm Install failed:", err) 26 | return err 27 | } 28 | return err 29 | } 30 | -------------------------------------------------------------------------------- /bin/gcloud/delete_cluster: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This removes a Kubernetes cluster from GKE 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 13 | RESOURCE_PREFIX="${RESOURCE_PREFIX:-ghactions}" 14 | 15 | # If a specific cluster name is provided, use it; otherwise use the prefix pattern 16 | if [ -n "${CLOUDSDK_CONTAINER_CLUSTER:-}" ]; then 17 | echo "Deleting specific cluster: ${CLOUDSDK_CONTAINER_CLUSTER}" 18 | gcloud container clusters delete --quiet --zone="${CLOUDSDK_COMPUTE_ZONE}" "${CLOUDSDK_CONTAINER_CLUSTER}" || true 19 | else 20 | echo "No specific cluster name provided. Looking for clusters with prefix: ${RESOURCE_PREFIX}" 21 | gcloud container clusters list --format="get(name)" --filter="name ~ ^${RESOURCE_PREFIX}-.*" | while read -r cluster; do 22 | if [ -n "$cluster" ]; then 23 | echo "Deleting cluster: $cluster" 24 | gcloud container clusters delete "$cluster" --zone="${CLOUDSDK_COMPUTE_ZONE}" --quiet || true 25 | fi 26 | done 27 | fi 28 | 29 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing neo4j-reverse-proxy helm chart 2 | 3 | This chart installs the following resources in "{{ .Release.Namespace }}" namespace: 4 | * Pod = "{{ include "neo4j.fullname" . }}-reverseproxy" 5 | * ClusterIP service = "{{ include "neo4j.fullname" . }}-reverseproxy-service" 6 | 7 | {{- if $.Values.reverseProxy.ingress.enabled }} 8 | {{- $ingressName := printf "%s-reverseproxy-ingress" (include "neo4j.fullname" .) -}} 9 | {{- $hostname := printf "$(kubectl get ingress/%s -n %s -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" $ingressName .Release.Namespace -}} 10 | {{- $port := include "neo4j.reverseProxy.port" . }} 11 | * Ingress = "{{ include "neo4j.fullname" . }}-reverseproxy-ingress" 12 | 13 | You can get the ingress address by executing the below command. (It can take a few seconds for the address to appear) 14 | {{ printf "kubectl get ingress/%s -n %s -o jsonpath='{.status.loadBalancer.ingress[0].ip}'" $ingressName .Release.Namespace }} 15 | 16 | You can execute the following URL in your browser to access Neo4j 17 | {{- if $.Values.reverseProxy.ingress.tls.enabled }} 18 | https://[INGRESS_ADDRESS]:{{ $port }} 19 | {{- else }} 20 | http://[INGRESS_ADDRESS]:{{ $port }} 21 | {{- end }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /neo4j-persistent-volume/templates/ops-pv.yaml: -------------------------------------------------------------------------------- 1 | {{- with .Values.ops }} 2 | {{- if or .ipAddress .fileShare | or .capacity.storage | or .storageClassName -}} 3 | kind: PersistentVolume 4 | apiVersion: v1 5 | metadata: 6 | # n.b. persistent volumes don't seem to belong to namespaces 7 | name: "{{ $.Release.Name }}-ops" 8 | labels: 9 | helm.neo4j.com/volume-role: "ops" 10 | app: "{{ template "neo4j.appName" $ }}" 11 | spec: 12 | accessModes: 13 | - ReadWriteMany 14 | capacity: 15 | storage: "{{ required "ops.capacity.storage is required (e.g. 1Ti)" .capacity.storage }}" 16 | nfs: 17 | path: "/{{ required "ops.fileShare is required" .fileShare }}" 18 | server: "{{ required "ops.ipAddress is required" .ipAddress }}" 19 | persistentVolumeReclaimPolicy: Retain 20 | --- 21 | apiVersion: v1 22 | kind: PersistentVolumeClaim 23 | metadata: 24 | name: "{{ $.Release.Name }}-ops" 25 | labels: 26 | helm.neo4j.com/volume-role: "ops" 27 | app: "{{ template "neo4j.appName" $ }}" 28 | spec: 29 | storageClassName: "{{ .storageClassName }}" 30 | volumeName: "{{ $.Release.Name }}-ops" 31 | accessModes: 32 | - ReadWriteMany 33 | resources: 34 | requests: 35 | storage: "{{ required "ops.capacity.storage is required (e.g. 1Ti)" .capacity.storage }}" 36 | {{- end }} 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /neo4j-admin/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG IMAGE 2 | FROM golang:1.23.8-bullseye as build 3 | RUN apt-get update && apt-get install -y curl && apt-get remove -y python* && apt-get autoremove -y && apt-get clean 4 | WORKDIR backup 5 | COPY backup/main main/ 6 | COPY backup/neo4j-admin neo4j-admin/ 7 | COPY backup/go.mod go.mod 8 | RUN go mod tidy && go mod download && go mod verify 9 | RUN env GOOS=linux GOARCH=amd64 go build -v -o backup_linux main/* 10 | 11 | ARG IMAGE 12 | FROM ${IMAGE} as final 13 | ARG DISTRIBUTION 14 | RUN \ 15 | if [ "${DISTRIBUTION}" = "debian" ]; then \ 16 | apt-get update && apt-get install -y bash netcat-openbsd curl wget gnupg apt-transport-https apt-utils lsb-release unzip less && \ 17 | apt-get remove -y python* && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* ; \ 18 | else \ 19 | #for redhat 20 | microdnf update -y && microdnf install -y bash nc wget gnupg yum-utils unzip less && \ 21 | # Safely remove python packages without affecting protected packages 22 | microdnf remove -y python3 python3-pip python2 python2-pip --exclude=dnf,libdnf --nobest || true && \ 23 | microdnf clean all ; \ 24 | fi 25 | COPY --from=build /go/backup/backup_linux bin/backup 26 | ENV NEO4J_server_config_strict__validation_enabled=false 27 | RUN chown neo4j:neo4j bin/backup 28 | CMD ["bin/backup"] 29 | -------------------------------------------------------------------------------- /devenv: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -f ./devenv.local ]; then 4 | . ./devenv.local 5 | # Cloud SDK variables 6 | export CLOUDSDK_CORE_PROJECT 7 | export CLOUDSDK_COMPUTE_ZONE 8 | export CLOUDSDK_COMPUTE_REGION 9 | export CLOUDSDK_CONTAINER_CLUSTER 10 | 11 | # AWS credentials 12 | export AWS_ACCESS_KEY_ID 13 | export AWS_SECRET_ACCESS_KEY 14 | 15 | # Azure storage 16 | export AZURE_STORAGE_KEY 17 | export AZURE_STORAGE_ACCOUNT 18 | export AZURE_CLIENT_ID 19 | export AZURE_CLIENT_SECRET 20 | export AZURE_TENANT_ID 21 | 22 | # Bloom license 23 | export BLOOM_LICENSE 24 | 25 | # GCP service account 26 | export GCP_SERVICE_ACCOUNT_CRED 27 | 28 | # IPS credentials 29 | export IPS_USERNAME 30 | export IPS_PASS 31 | export IPS_EMAIL 32 | 33 | # Docker images 34 | export NEO4J_DOCKER_IMG 35 | export NEO4J_DOCKER_BACKUP_IMG 36 | export NEO4J_REVERSE_PROXY_IMG 37 | export NEO4J_OPERATIONS_IMG 38 | 39 | # Repository and registry settings 40 | export ARTIFACT_REGISTRY_REPO_NAME 41 | export HELM_REPO_NAME 42 | 43 | else 44 | echo "Couldn't find ./devenv.local." 45 | fi 46 | 47 | # Add scripts to path 48 | export PATH=${PATH}:./bin 49 | 50 | # Use a local kubeconfig 51 | KUBECONFIG=".kube/config" 52 | if [ -f ~/.kube/config ]; then 53 | cp ~/.kube/config "${KUBECONFIG}" 54 | fi 55 | export KUBECONFIG 56 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/README.md: -------------------------------------------------------------------------------- 1 | # Example - Install Neo4j using manually created disks and a Persistent Volume Claim 2 | 3 | This example uses manually provisioned cloud disks for the Neo4j storage volumes. 4 | The `neo4j-persistent-volume` chart is used to configure a PV and PVC for the disk. 5 | The `neo4j` chart then configures the statefulset to mount the PVC 6 | 7 | The example will use the following Helm values 8 | ```yaml 9 | neo4j: 10 | name: volume-manual 11 | volumes: 12 | data: 13 | mode: volume 14 | volume: 15 | persistentVolumeClaim: 16 | claimName: volume-manual-disk-pvc 17 | ``` 18 | 19 | ## Install in AWS 20 | ```shell 21 | export AWS_ZONE=us-east-1a 22 | ./install-example-aws.sh $AWS_ZONE 23 | ``` 24 | 25 | ## Cleanup AWS 26 | ```shell 27 | ./cleanup-example-aws.sh 28 | ``` 29 | 30 | ## Install in GCP 31 | ```shell 32 | export CLOUDSDK_CORE_PROJECT=my-gcp-project 33 | export CLOUDSDK_COMPUTE_ZONE=my-zone 34 | ./install-example-gcp.sh 35 | ``` 36 | 37 | ## Cleanup GCP 38 | ```shell 39 | ./cleanup-example-gcp.sh 40 | ``` 41 | 42 | ## Install in Azure 43 | ```shell 44 | export AKS_CLUSTER_NAME=my-neo4j-cluster 45 | export AZURE_RESOURCE_GROUP=myResourceGroup 46 | export AZURE_LOCATION=mylocation 47 | ./install-example-azure.sh $AKS_CLUSTER_NAME $AZURE_RESOURCE_GROUP $AZURE_LOCATION 48 | ``` 49 | 50 | ## Cleanup Azure 51 | ```shell 52 | ./cleanup-example-azure.sh 53 | ``` 54 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/install-example-azure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-manual 4 | readonly AKS_CLUSTER_NAME=${1?' Azure AKS cluster name must be 1st argument'} 5 | readonly AZ_RESOURCE_GROUP=${2?' Azure resource group must be 1st argument'} 6 | readonly AZ_LOCATION=${3?' Azure location must be 2nd argument'} 7 | 8 | helm_install() { 9 | pushd "${PROJECT_ROOT}" > /dev/null || exit 10 | local -r node_resource_group=$(az aks show --resource-group "${AZ_RESOURCE_GROUP}" --name "${AKS_CLUSTER_NAME}" --query nodeResourceGroup -o tsv) 11 | local -r disk_id=$(az disk create --name "${RELEASE_NAME}" --size-gb "10" --max-shares 1 --resource-group "${node_resource_group}" --location ${AZ_LOCATION} --output tsv --query id) 12 | helm install "${RELEASE_NAME}"-disk neo4j-persistent-volume \ 13 | --set neo4j.name="${RELEASE_NAME}" \ 14 | --set data.driver=disk.csi.azure.com \ 15 | --set data.storageClassName="manual" \ 16 | --set data.reclaimPolicy="Delete" \ 17 | --set data.createPvc=true \ 18 | --set data.createStorageClass=false \ 19 | --set data.volumeHandle="${disk_id}" \ 20 | --set data.capacity.storage=10Gi 21 | helm install "${RELEASE_NAME}" neo4j -f examples/persistent-volume-manual/persistent-volume-manual.yaml 22 | } 23 | 24 | helm_install 25 | -------------------------------------------------------------------------------- /internal/model/reverse_proxy_values.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | type Neo4jReverseProxyValues struct { 4 | NameOverride string `yaml:"nameOverride,omitempty"` 5 | FullnameOverride string `yaml:"fullnameOverride,omitempty"` 6 | ReverseProxy ReverseProxy `yaml:"reverseProxy,omitempty"` 7 | } 8 | 9 | type ReverseProxy struct { 10 | Image string `yaml:"image,omitempty"` 11 | ImagePullSecrets []string `yaml:"imagePullSecrets,omitempty"` 12 | ServiceName string `yaml:"serviceName,omitempty"` 13 | Namespace string `yaml:"namespace,omitempty"` 14 | Domain string `yaml:"domain,omitempty"` 15 | Ingress Ingress `yaml:"ingress,omitempty"` 16 | PodLabels map[string]string `yaml:"podLabels,omitempty"` 17 | NodeSelector map[string]string `yaml:"nodeSelector,omitempty"` 18 | } 19 | 20 | type Ingress struct { 21 | Enabled bool `yaml:"enabled"` 22 | Annotations map[string]string `yaml:"annotations,omitempty"` 23 | TLS TLS `yaml:"tls,omitempty"` 24 | Host string `yaml:"host,omitempty"` 25 | } 26 | 27 | type TLS struct { 28 | Enabled bool `yaml:"enabled"` 29 | Config []Config `yaml:"config,omitempty"` 30 | } 31 | 32 | type Config struct { 33 | Hosts []string `yaml:"hosts,omitempty"` 34 | SecretName string `yaml:"secretName,omitempty"` 35 | } 36 | -------------------------------------------------------------------------------- /bin/gcloud-create-gke-cluster: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This creates a new Kubernetes cluster on GKE 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | CLOUDSDK_CORE_PROJECT="${CLOUDSDK_CORE_PROJECT:?CLOUDSDK_CORE_PROJECT is required}" 13 | CLOUDSDK_CONTAINER_CLUSTER="${CLOUDSDK_CONTAINER_CLUSTER:?CLOUDSDK_CONTAINER_CLUSTER is required}" 14 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 15 | 16 | # Parameters 17 | NODE_MACHINE="e2-standard-4" 18 | NUM_NODES="${NUM_NODES:-11}" 19 | 20 | # For more info on release channels see https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels 21 | RELEASE_CHANNEL="stable" 22 | # make a new Kubernetes cluster 23 | # 24 | gcloud container clusters create "${CLOUDSDK_CONTAINER_CLUSTER}" \ 25 | --release-channel=${RELEASE_CHANNEL} \ 26 | --zone="${CLOUDSDK_COMPUTE_ZONE}" \ 27 | --num-nodes="${NUM_NODES}" \ 28 | --workload-pool="${CLOUDSDK_CORE_PROJECT}.svc.id.goog" \ 29 | --preemptible --machine-type="${NODE_MACHINE}" --image-type="COS_CONTAINERD" \ 30 | --disk-type="pd-ssd" --disk-size="20" \ 31 | --max-pods-per-node=30 --enable-ip-alias \ 32 | --enable-shielded-nodes --metadata=disable-legacy-endpoints=true --no-enable-basic-auth 33 | 34 | gcloud-configure-kubectl 35 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/install-example-azure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | readonly AKS_CLUSTER_NAME=${1?' Azure AKS cluster name must be 1st argument'} 5 | readonly AZ_RESOURCE_GROUP=${2?' Azure resource group must be 1st argument'} 6 | readonly AZ_LOCATION=${3?' Azure location must be 2nd argument'} 7 | 8 | helm_install() { 9 | pushd "${PROJECT_ROOT}" > /dev/null || exit 10 | local -r node_resource_group=$(az aks show --resource-group "${AZ_RESOURCE_GROUP}" --name "${AKS_CLUSTER_NAME}" --query nodeResourceGroup -o tsv) 11 | local -r disk_id=$(az disk create --name "${RELEASE_NAME}" --size-gb "10" --max-shares 1 --resource-group "${node_resource_group}" --location ${AZ_LOCATION} --output tsv --query id) 12 | helm install "${RELEASE_NAME}"-disk neo4j-persistent-volume \ 13 | --set neo4j.name="${RELEASE_NAME}" \ 14 | --set data.driver=disk.csi.azure.com \ 15 | --set data.storageClassName="manual" \ 16 | --set data.reclaimPolicy="Delete" \ 17 | --set data.createPvc=false \ 18 | --set data.createStorageClass=true \ 19 | --set data.volumeHandle="${disk_id}" \ 20 | --set data.capacity.storage=10Gi 21 | helm install "${RELEASE_NAME}" neo4j -f examples/persistent-volume-selector-cluster/persistent-volume-selector.yaml 22 | } 23 | 24 | helm_install 25 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/install-example-azure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | readonly AKS_CLUSTER_NAME=${1?' Azure AKS cluster name must be 1st argument'} 5 | readonly AZ_RESOURCE_GROUP=${2?' Azure resource group must be 1st argument'} 6 | readonly AZ_LOCATION=${3?' Azure location must be 2nd argument'} 7 | 8 | helm_install() { 9 | pushd "${PROJECT_ROOT}" > /dev/null || exit 10 | local -r node_resource_group=$(az aks show --resource-group "${AZ_RESOURCE_GROUP}" --name "${AKS_CLUSTER_NAME}" --query nodeResourceGroup -o tsv) 11 | local -r disk_id=$(az disk create --name "${RELEASE_NAME}" --size-gb "10" --max-shares 1 --resource-group "${node_resource_group}" --location ${AZ_LOCATION} --output tsv --query id) 12 | helm install "${RELEASE_NAME}"-disk neo4j-persistent-volume \ 13 | --set neo4j.name="${RELEASE_NAME}" \ 14 | --set data.driver=disk.csi.azure.com \ 15 | --set data.storageClassName="manual" \ 16 | --set data.reclaimPolicy="Delete" \ 17 | --set data.createPvc=false \ 18 | --set data.createStorageClass=true \ 19 | --set data.volumeHandle="${disk_id}" \ 20 | --set data.capacity.storage=10Gi 21 | helm install "${RELEASE_NAME}" neo4j -f examples/persistent-volume-selector-standalone/persistent-volume-selector.yaml 22 | } 23 | 24 | helm_install 25 | -------------------------------------------------------------------------------- /neo4j/neo4j-operations/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "k8s.io/client-go/kubernetes" 5 | "log" 6 | "os" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | var ( 12 | Namespace string 13 | ReleaseName string 14 | ClientSet *kubernetes.Clientset 15 | ) 16 | 17 | func main() { 18 | 19 | err := checkRequiredVariables() 20 | if err != nil { 21 | log.Printf("missing required env variables \n %v \n Exiting... :-( ", err) 22 | os.Exit(1) 23 | } 24 | Namespace = os.Getenv("NAMESPACE") 25 | ReleaseName = os.Getenv("RELEASE_NAME") 26 | 27 | // load kubernetes config 28 | clientSet, err := getK8sClient() 29 | if err != nil { 30 | log.Printf("error while setting k8s client \n %v \n Exiting... :-( ", err) 31 | os.Exit(1) 32 | } 33 | ClientSet = clientSet 34 | 35 | // get password from kubernetes secret either specified in values.yaml or the generic k8s secret 36 | pass, err := getCredsFromSecret(os.Getenv("SECRETNAME")) 37 | if err != nil { 38 | log.Printf("error while fetching secret pass \n %v", err) 39 | os.Exit(1) 40 | } 41 | 42 | log.Printf("Waiting for Neo4j to startup. Sleeping for 60 seconds.") 43 | time.Sleep(60 * time.Second) 44 | // connect using the above creds 45 | username := strings.Split(pass, "/")[0] 46 | password := strings.Split(pass, "/")[1] 47 | err = ExecuteEnablement(username, password) 48 | if err != nil { 49 | log.Printf("error while connecting to neo4j \n %v", err) 50 | os.Exit(1) 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /examples/persistent-volume-manual/install-example-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-manual 4 | readonly AWS_ZONE=${1?' AWS zone must be 1st argument'} 5 | 6 | helm_install() { 7 | pushd "${PROJECT_ROOT}" > /dev/null || exit 8 | if ! kubectl get daemonset ebs-csi-node -n kube-system &> /dev/null; then 9 | echo "WARNING: EBS CSI Driver not found, this example will not work." 10 | echo "See https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html for instructions to install driver" 11 | fi 12 | local -r volumeId=$(aws ec2 create-volume \ 13 | --availability-zone="${AWS_ZONE}" \ 14 | --size=10 \ 15 | --volume-type=gp3 \ 16 | --tag-specifications 'ResourceType=volume,Tags=[{Key=volume,Value='"${RELEASE_NAME}"'}]' \ 17 | --no-cli-pager \ 18 | --output text \ 19 | --query VolumeId) 20 | 21 | helm install "${RELEASE_NAME}"-disk neo4j-persistent-volume \ 22 | --set neo4j.name="${RELEASE_NAME}" \ 23 | --set data.driver=ebs.csi.aws.com \ 24 | --set data.reclaimPolicy="Delete" \ 25 | --set data.createPvc=true \ 26 | --set data.createStorageClass=false \ 27 | --set data.volumeHandle="${volumeId}" \ 28 | --set data.capacity.storage=10Gi 29 | 30 | helm install "${RELEASE_NAME}" neo4j -f examples/persistent-volume-manual/persistent-volume-manual.yaml 31 | } 32 | 33 | helm_install 34 | -------------------------------------------------------------------------------- /neo4j/neo4j-operations/k8s.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "k8s.io/client-go/kubernetes" 8 | "k8s.io/client-go/rest" 9 | "log" 10 | "strings" 11 | ) 12 | 13 | // get k8s client set 14 | func getK8sClient() (*kubernetes.Clientset, error) { 15 | 16 | config, err := rest.InClusterConfig() 17 | if err != nil { 18 | return nil, fmt.Errorf("error seen while getting cluster config \n %v", err) 19 | } 20 | 21 | clientSet, err := kubernetes.NewForConfig(config) 22 | if err != nil { 23 | return nil, fmt.Errorf("error seen while getting kuberenetes config \n %v", err) 24 | } 25 | 26 | return clientSet, nil 27 | } 28 | 29 | // getCredsFromSecret get the neo4j authentication details from the k8s secret stored under the key NEO4J_AUTH 30 | func getCredsFromSecret(secretName string) (string, error) { 31 | 32 | log.Println("Fetching creds from secret", secretName) 33 | secret, err := ClientSet.CoreV1().Secrets(Namespace).Get(context.TODO(), secretName, v1.GetOptions{}) 34 | if err != nil { 35 | return "", fmt.Errorf("unable to fetch details about secret %s \n %v", secretName, err) 36 | } 37 | pass, present := secret.Data["NEO4J_AUTH"] 38 | if !present { 39 | return "", fmt.Errorf("secret does not contain key NEO4J_AUTH") 40 | } 41 | if len(strings.Split(string(pass), "/")) != 2 { 42 | return "", fmt.Errorf("kubernetes secret pass should be of the format /") 43 | } 44 | return string(pass), nil 45 | } 46 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/install-example-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | readonly AWS_ZONE=${1?' AWS zone must be 1st argument'} 5 | 6 | helm_install() { 7 | if ! kubectl get daemonset ebs-csi-node -n kube-system &> /dev/null; then 8 | echo "WARNING: EBS CSI Driver not found, this example will not work." 9 | echo "See https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html for instructions to install driver" 10 | fi 11 | pushd "${PROJECT_ROOT}" > /dev/null || exit 12 | local -r volumeId=$(aws ec2 create-volume \ 13 | --availability-zone="${AWS_ZONE}" \ 14 | --size=10 \ 15 | --volume-type=gp3 \ 16 | --tag-specifications 'ResourceType=volume,Tags=[{Key=volume,Value='"${RELEASE_NAME}"'}]' \ 17 | --no-cli-pager \ 18 | --output text \ 19 | --query VolumeId) 20 | 21 | helm install "${RELEASE_NAME}"-disk neo4j-persistent-volume \ 22 | --set neo4j.name="${RELEASE_NAME}" \ 23 | --set data.driver=ebs.csi.aws.com \ 24 | --set data.reclaimPolicy="Delete" \ 25 | --set data.createPvc=false \ 26 | --set data.createStorageClass=true \ 27 | --set data.volumeHandle="${volumeId}" \ 28 | --set data.capacity.storage=10Gi 29 | helm install "${RELEASE_NAME}" neo4j -f examples/persistent-volume-selector-standalone/persistent-volume-selector.yaml 30 | } 31 | 32 | helm_install 33 | -------------------------------------------------------------------------------- /neo4j-headless-service/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.namespaceParameter" -}} 2 | {{ if ne "default" .Release.Namespace }} --namespace "{{.Release.Namespace}}"{{ end }} 3 | {{- end -}} 4 | 5 | {{- define "neo4j.logPassword" -}} 6 | {{ if .Values.logInitialPassword }}{{ .Values.neo4j.password }}{{ else }}**********{{ end }} 7 | {{- end -}} 8 | 9 | {{- $authDisabled := index .Values.config "dbms.security.auth_enabled" | default "" | regexMatch "(?i)no|false" -}} 10 | {{- $boltPort := .Values.ports.bolt.port | default "7687" }} 11 | {{- $ignored := set .Values.neo4j "acceptLicenseAgreement" "yes" }} 12 | 13 | {{- if .Release.IsInstall -}} 14 | 15 | Thank you for installing {{ .Chart.Name }}. 16 | 17 | Your release "{{ .Release.Name }}" has been installed in namespace "{{ .Release.Namespace }}". 18 | 19 | {{ else -}} 20 | 21 | You have updated {{ .Chart.Name }} in namespace "{{ .Release.Namespace }}". 22 | 23 | {{ end -}} 24 | 25 | Once rollout is complete you can connect to your Neo4j cluster using "neo4j://{{ .Release.Name }}-neo4j.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ $boltPort }}". Try: 26 | 27 | $ kubectl run --rm -it{{ template "neo4j.namespaceParameter" . }} --image "{{ template "neo4j.image" . }}" cypher-shell \ 28 | -- cypher-shell -a "neo4j://{{ template "neo4j.name" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ $boltPort }}"{{ if not $authDisabled | and .Values.neo4j.password |and .Values.logInitialPassword}} -u neo4j -p "{{ template "neo4j.logPassword" . }}"{{ end }} 29 | 30 | Graphs are everywhere! 31 | -------------------------------------------------------------------------------- /internal/resources/model.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "fmt" 5 | "gopkg.in/yaml.v3" 6 | "io/ioutil" 7 | "os" 8 | "path" 9 | "path/filepath" 10 | "runtime" 11 | ) 12 | 13 | var _, thisFile, _, _ = runtime.Caller(0) 14 | var resourcesDir = path.Dir(thisFile) 15 | 16 | type YamlFile interface { 17 | Path() string 18 | HelmArgs() []string 19 | Data() (map[interface{}]interface{}, error) 20 | } 21 | 22 | type yamlFile struct { 23 | path string 24 | } 25 | 26 | func (y *yamlFile) Path() string { 27 | return y.path 28 | } 29 | 30 | func (y *yamlFile) HelmArgs() []string { 31 | return []string{"-f", y.path} 32 | } 33 | 34 | func (y *yamlFile) Data() (map[interface{}]interface{}, error) { 35 | file, err := ioutil.ReadFile(y.Path()) 36 | if err != nil { 37 | return nil, err 38 | } 39 | data := make(map[interface{}]interface{}) 40 | err = yaml.Unmarshal(file, &data) 41 | if err != nil { 42 | return nil, err 43 | } 44 | return data, nil 45 | } 46 | 47 | func resourceExistsAt(path string) (bool, error) { 48 | if fileInfo, err := os.Stat(path); err == nil { 49 | if filepath.Ext(path) == ".yaml" && !fileInfo.IsDir() { 50 | return true, nil 51 | } 52 | return false, fmt.Errorf("unexpected error occured. File %s returned fileInfo: %v", path, fileInfo) 53 | } else { 54 | return false, err 55 | } 56 | } 57 | 58 | func newYamlFile(filename string) YamlFile { 59 | fullPath := path.Join(resourcesDir, filename) 60 | if exists, err := resourceExistsAt(fullPath); err != nil || !exists { 61 | panic(err) 62 | } 63 | return &yamlFile{fullPath} 64 | } 65 | -------------------------------------------------------------------------------- /neo4j-headless-service/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.name" -}} 2 | {{- if eq (len (trim $.Values.neo4j.name)) 0 -}} 3 | {{- fail (printf "neo4j.name is required") -}} 4 | {{- else -}} 5 | {{ .Values.neo4j.name }} 6 | {{- end -}} 7 | {{- end -}} 8 | 9 | {{- define "neo4j.checkPortMapping" -}} 10 | {{- $httpPort := .Values.ports.http.port | int | default 7474 -}} 11 | {{- $httpsPort := .Values.ports.https.port | int | default 7473 -}} 12 | {{- $boltPort := .Values.ports.bolt.port | int | default 7687 -}} 13 | {{- $backupPort := .Values.ports.backup.port | int | default 6362 -}} 14 | 15 | {{- if and (eq .Values.ports.http.enabled true) (ne $httpPort 7474) -}} 16 | {{- include "neo4j.portRemappingFailureMessage" $httpPort -}} 17 | {{- end -}} 18 | {{- if and (eq .Values.ports.https.enabled true) (ne $httpsPort 7473) -}} 19 | {{- include "neo4j.portRemappingFailureMessage" $httpsPort -}} 20 | {{- end -}} 21 | {{- if and (eq .Values.ports.bolt.enabled true) (ne $boltPort 7687) -}} 22 | {{- include "neo4j.portRemappingFailureMessage" $boltPort -}} 23 | {{- end -}} 24 | {{- if and (eq .Values.ports.backup.enabled true) (ne $backupPort 6362) -}} 25 | {{- include "neo4j.portRemappingFailureMessage" $backupPort -}} 26 | {{- end -}} 27 | {{- end -}} 28 | 29 | {{- define "neo4j.portRemappingFailureMessage" -}} 30 | {{- $message := . | printf "port re-mapping is not allowed in headless service. Please remove custom port %d from values.yaml" -}} 31 | {{- fail $message -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/reverse-proxy/proxy/proxy.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/http/httputil" 9 | "net/url" 10 | "os" 11 | "strconv" 12 | ) 13 | 14 | func httpProxy(hostname string) (*httputil.ReverseProxy, error) { 15 | url, err := url.Parse(fmt.Sprintf("http://%s:7474", hostname)) 16 | if err != nil { 17 | return nil, err 18 | } 19 | proxy := httputil.NewSingleHostReverseProxy(url) 20 | 21 | // Modify response 22 | proxy.ModifyResponse = func(response *http.Response) error { 23 | 24 | if response.Header.Get("Content-Type") == "application/json" { 25 | bodyBytes, err := io.ReadAll(response.Body) 26 | if err != nil { 27 | return fmt.Errorf("error while reading json response \n %v", err) 28 | } 29 | portInt, err := strconv.Atoi(os.Getenv("PORT")) 30 | if err != nil { 31 | return err 32 | } 33 | //subtracting 8000 from the port number since we are adding 8000 in the helm chart template so as to not use port range < 1024 34 | portInt -= 8000 35 | port := fmt.Sprintf(":%d", portInt) 36 | b := bytes.Replace(bodyBytes, []byte(":7687"), []byte(port), -1) 37 | response.Header.Set("Content-Length", strconv.Itoa(len(b))) 38 | response.Body = io.NopCloser(bytes.NewReader(b)) 39 | } 40 | return nil 41 | } 42 | 43 | return proxy, nil 44 | } 45 | 46 | func boltProxy(hostname string) (*httputil.ReverseProxy, error) { 47 | url, err := url.Parse(fmt.Sprintf("http://%s:7687", hostname)) 48 | if err != nil { 49 | return nil, err 50 | } 51 | return httputil.NewSingleHostReverseProxy(url), nil 52 | } 53 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-standalone/README.md: -------------------------------------------------------------------------------- 1 | # Example - Install Neo4j using manually created disks and a Persistent Volume selector 2 | 3 | This example uses manually provisioned cloud disks for the Neo4j storage volumes. 4 | The `neo4j-persistent-volume` chart is used to configure a PV and storage class for the disk. 5 | The `neo4j` chart then configures the statefulset to use a selector based volume claim template. 6 | A PVC will be dynamically provisioned using the created PV. 7 | 8 | The example will use the following Helm values 9 | ```yaml 10 | neo4j: 11 | name: volume-selector 12 | volumes: 13 | data: 14 | mode: selector 15 | selector: 16 | storageClassName: "manual" 17 | accessModes: 18 | - ReadWriteOnce 19 | requests: 20 | storage: 10Gi 21 | 22 | ``` 23 | 24 | ## Install in AWS 25 | ```shell 26 | export AWS_ZONE=us-east-1a 27 | ./install-example-aws.sh $AWS_ZONE 28 | ``` 29 | 30 | ## Cleanup AWS 31 | ```shell 32 | ./cleanup-example-aws.sh 33 | ``` 34 | 35 | ## Install in GCP 36 | ```shell 37 | export CLOUDSDK_CORE_PROJECT=my-gcp-project 38 | export CLOUDSDK_COMPUTE_ZONE=my-zone 39 | ./install-example-gcp.sh 40 | ``` 41 | 42 | ## Cleanup GCP 43 | ```shell 44 | ./cleanup-example-gcp.sh 45 | ``` 46 | 47 | ## Install in Azure 48 | ```shell 49 | export AKS_CLUSTER_NAME=my-neo4j-cluster 50 | export AZURE_RESOURCE_GROUP=myResourceGroup 51 | export AZURE_LOCATION=mylocation 52 | ./install-example-azure.sh $AKS_CLUSTER_NAME $AZURE_RESOURCE_GROUP $AZURE_LOCATION 53 | ``` 54 | 55 | ## Cleanup Azure 56 | ```shell 57 | ./cleanup-example-azure.sh 58 | ``` 59 | -------------------------------------------------------------------------------- /neo4j-headless-service/templates/neo4j-svc.yaml: -------------------------------------------------------------------------------- 1 | {{- /* In almost all cases the selector should be unspecified and the default selector should be used. */ -}} 2 | {{- template "neo4j.checkPortMapping" . -}} 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: "{{ include "neo4j.name" $ }}-headless" 7 | namespace: "{{ .Release.Namespace }}" 8 | labels: 9 | helm.neo4j.com/neo4j.name: "{{ template "neo4j.name" $ }}" 10 | app: "{{ template "neo4j.name" . }}" 11 | {{- with .Values.annotations }} 12 | annotations: {{ toYaml . | nindent 4 }} 13 | {{- end }} 14 | spec: 15 | type: ClusterIP 16 | clusterIP: None 17 | sessionAffinity: None 18 | ports: 19 | {{- with .Values.ports }} 20 | {{- if .http.enabled }} 21 | - protocol: TCP 22 | port: {{ .http.port | default 7474 }} 23 | targetPort: 7474 24 | name: http 25 | {{- end }} 26 | {{- if .https.enabled }} 27 | - protocol: TCP 28 | port: {{ .https.port | default 7473 }} 29 | targetPort: 7473 30 | name: https 31 | {{- end }} 32 | {{- if .bolt.enabled }} 33 | - protocol: TCP 34 | port: {{ .bolt.port | default 7687 }} 35 | targetPort: 7687 36 | name: tcp-bolt 37 | {{- end }} 38 | {{ with .backup }} 39 | {{- if .enabled }} 40 | - protocol: TCP 41 | port: {{ .port | default 6362 }} 42 | targetPort: 6362 43 | name: tcp-backup 44 | {{- end }} 45 | {{- end }} 46 | {{- end }} 47 | 48 | selector: 49 | app: "{{ template "neo4j.name" . }}" 50 | {{- with .Values.selector }} 51 | {{- . | toYaml | nindent 4 }} 52 | {{- end }} 53 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/install-example-aws.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | readonly PROJECT_ROOT="$(dirname "$(dirname "$(dirname "$0")")")" 3 | readonly RELEASE_NAME=volume-selector 4 | readonly AWS_ZONE=${1?' AWS zone must be 1st argument'} 5 | 6 | helm_install() { 7 | if ! kubectl get daemonset ebs-csi-node -n kube-system &> /dev/null; then 8 | echo "WARNING: EBS CSI Driver not found, this example will not work." 9 | echo "See https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html for instructions to install driver" 10 | fi 11 | pushd "${PROJECT_ROOT}" > /dev/null || exit 12 | for i in {1..3}; do 13 | local volumeId=$(aws ec2 create-volume \ 14 | --availability-zone="${AWS_ZONE}" \ 15 | --size=10 \ 16 | --volume-type=gp3 \ 17 | --tag-specifications 'ResourceType=volume,Tags=[{Key=volume,Value='"${RELEASE_NAME}-${i}"'}]' \ 18 | --no-cli-pager \ 19 | --output text \ 20 | --query VolumeId) 21 | 22 | helm install "${RELEASE_NAME}-disk-${i}" neo4j-persistent-volume \ 23 | --set neo4j.name="${RELEASE_NAME}" \ 24 | --set data.driver=ebs.csi.aws.com \ 25 | --set data.reclaimPolicy="Delete" \ 26 | --set data.createPvc=false \ 27 | --set data.createStorageClass=true \ 28 | --set data.volumeHandle="${volumeId}" \ 29 | --set data.capacity.storage=10Gi 30 | helm install "${RELEASE_NAME}-${i}" neo4j -f examples/persistent-volume-selector-cluster/persistent-volume-selector.yaml 31 | done 32 | } 33 | 34 | helm_install 35 | -------------------------------------------------------------------------------- /internal/unit_tests/neo4j_resources.go: -------------------------------------------------------------------------------- 1 | package unit_tests 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | var funcMap = map[string]func(cpuValue string) string{ 9 | "cpuRequests": func(cpuValue string) string { 10 | return fmt.Sprintf("neo4j.resources.requests.cpu=%s", cpuValue) 11 | }, 12 | "memoryRequests": func(memValue string) string { 13 | return fmt.Sprintf("neo4j.resources.requests.memory=%s", memValue) 14 | }, 15 | "memoryResources": func(memValue string) string { 16 | return fmt.Sprintf("neo4j.resources.memory=%s", memValue) 17 | }, 18 | "cpuResources": func(cpuValue string) string { 19 | return fmt.Sprintf("neo4j.resources.cpu=%s", cpuValue) 20 | }, 21 | } 22 | 23 | type Neo4jResourceTestCase struct { 24 | arguments []string 25 | cpu string 26 | memory string 27 | } 28 | 29 | func GenerateNeo4jResourcesTestCase(funcs []string, cpu string, memory string) Neo4jResourceTestCase { 30 | if cpu == "" { 31 | cpu = "1" 32 | } 33 | if memory == "" { 34 | memory = "2Gi" 35 | } 36 | return Neo4jResourceTestCase{ 37 | arguments: getArgs(funcs, cpu, memory), 38 | cpu: cpu, 39 | memory: memory, 40 | } 41 | } 42 | 43 | func getArgs(funcs []string, cpu string, memory string) []string { 44 | 45 | args := []string{"--set", "volumes.data.mode=selector", "--set", "neo4j.acceptLicenseAgreement=yes"} 46 | for _, funcName := range funcs { 47 | f := funcMap[funcName] 48 | if strings.Contains(funcName, "cpu") { 49 | args = append(args, "--set", f(cpu)) 50 | } 51 | if strings.Contains(funcName, "memory") { 52 | args = append(args, "--set", f(memory)) 53 | } 54 | } 55 | return args 56 | } 57 | -------------------------------------------------------------------------------- /examples/secret-mounts/create-s3-secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to create Kubernetes secret for S3 credentials 4 | # Usage: ./create-s3-secret.sh [secret-name] [namespace] 5 | 6 | set -e 7 | 8 | SECRET_NAME="${1:-s3-credentials}" 9 | NAMESPACE="${2:-default}" 10 | 11 | echo "Creating Kubernetes secret for S3 credentials..." 12 | echo "Secret name: $SECRET_NAME" 13 | echo "Namespace: $NAMESPACE" 14 | echo 15 | 16 | # Prompt for credentials 17 | read -p "Enter S3 Access Key ID: " ACCESS_KEY_ID 18 | read -s -p "Enter S3 Secret Access Key: " SECRET_ACCESS_KEY 19 | echo 20 | read -p "Enter S3 Endpoint (e.g., https://s3.example.com): " ENDPOINT 21 | read -p "Enter S3 Region (e.g., us-east-1): " REGION 22 | 23 | echo 24 | echo "Creating secret..." 25 | 26 | kubectl create secret generic "$SECRET_NAME" \ 27 | --namespace="$NAMESPACE" \ 28 | --from-literal=access-key-id="$ACCESS_KEY_ID" \ 29 | --from-literal=secret-access-key="$SECRET_ACCESS_KEY" \ 30 | --from-literal=endpoint="$ENDPOINT" \ 31 | --from-literal=region="$REGION" 32 | 33 | echo "Secret '$SECRET_NAME' created successfully in namespace '$NAMESPACE'" 34 | echo 35 | echo "You can now use this secret in your values.yaml:" 36 | echo "secretMounts:" 37 | echo " s3-credentials:" 38 | echo " secretName: \"$SECRET_NAME\"" 39 | echo " mountPath: \"/var/secrets/s3\"" 40 | echo " items:" 41 | echo " - key: \"access-key-id\"" 42 | echo " path: \"access-key\"" 43 | echo " - key: \"secret-access-key\"" 44 | echo " path: \"secret-key\"" 45 | echo " - key: \"endpoint\"" 46 | echo " path: \"endpoint\"" 47 | echo " - key: \"region\"" 48 | echo " path: \"region\"" 49 | echo " defaultMode: 0600" 50 | -------------------------------------------------------------------------------- /examples/persistent-volume-selector-cluster/README.md: -------------------------------------------------------------------------------- 1 | # Example - Install Neo4j Cluster using manually created disks and a Persistent Volume selector 2 | 3 | This example uses manually provisioned cloud disks for the Neo4j storage volumes and installs a Neo4j 3 server cluster. 4 | The `neo4j-persistent-volume` chart is used to configure 3 persistent volumes for each disk and manual storage class. 5 | The `neo4j` chart then configures the StatefulSet to use a selector based volume claim template. 6 | A PVC will be dynamically provisioned for each PV. 7 | 8 | The example will use the following Helm values 9 | ```yaml 10 | neo4j: 11 | name: volume-selector 12 | minimumClusterSize: 3 13 | acceptLicenseAgreement: "yes" 14 | edition: enterprise 15 | volumes: 16 | data: 17 | mode: selector 18 | selector: 19 | storageClassName: "manual" 20 | accessModes: 21 | - ReadWriteOnce 22 | requests: 23 | storage: 10Gi 24 | ``` 25 | 26 | ## Install in AWS 27 | ```shell 28 | export AWS_ZONE=us-east-1a 29 | ./install-example-aws.sh $AWS_ZONE 30 | ``` 31 | 32 | ## Cleanup AWS 33 | ```shell 34 | ./cleanup-example-aws.sh 35 | ``` 36 | 37 | ## Install in GCP 38 | ```shell 39 | export CLOUDSDK_CORE_PROJECT=my-gcp-project 40 | export CLOUDSDK_COMPUTE_ZONE=my-zone 41 | ./install-example-gcp.sh 42 | ``` 43 | 44 | ## Cleanup GCP 45 | ```shell 46 | ./cleanup-example-gcp.sh 47 | ``` 48 | 49 | ## Install in Azure 50 | ```shell 51 | export AKS_CLUSTER_NAME=my-neo4j-cluster 52 | export AZURE_RESOURCE_GROUP=myResourceGroup 53 | export AZURE_LOCATION=mylocation 54 | ./install-example-azure.sh $AKS_CLUSTER_NAME $AZURE_RESOURCE_GROUP $AZURE_LOCATION 55 | ``` 56 | 57 | ## Cleanup Azure 58 | ```shell 59 | ./cleanup-example-azure.sh 60 | ``` 61 | -------------------------------------------------------------------------------- /bin/gcloud/create_cluster: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This creates a new Kubernetes cluster on GKE 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | # Required env vars 12 | CLOUDSDK_CORE_PROJECT="${CLOUDSDK_CORE_PROJECT:?CLOUDSDK_CORE_PROJECT is required}" 13 | CLOUDSDK_CONTAINER_CLUSTER="${CLOUDSDK_CONTAINER_CLUSTER:?CLOUDSDK_CONTAINER_CLUSTER is required}" 14 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 15 | #KUBECONFIG="${KUBECONFIG:?KUBECONFIG is required}" 16 | 17 | NODE_MACHINE="${NODE_MACHINE:-e2-standard-4}" 18 | NUM_NODES="${NUM_NODES:-11}" 19 | RELEASE_CHANNEL="${RELEASE_CHANNEL:-stable}" 20 | DISK_TYPE="${DISK_TYPE:-pd-ssd}" 21 | DISK_SIZE="${DISK_SIZE:-40}" 22 | MAX_PODS_PER_NODE="${MAX_PODS_PER_NODE:-30}" 23 | 24 | # For more info on release channels see https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels 25 | # create a new Kubernetes cluster 26 | # 27 | gcloud container clusters create "${CLOUDSDK_CONTAINER_CLUSTER}" \ 28 | --release-channel="${RELEASE_CHANNEL}" \ 29 | --zone="${CLOUDSDK_COMPUTE_ZONE}" \ 30 | --num-nodes="${NUM_NODES}" \ 31 | --workload-pool="${CLOUDSDK_CORE_PROJECT}.svc.id.goog" \ 32 | --preemptible --machine-type="${NODE_MACHINE}" --image-type="COS_CONTAINERD" \ 33 | --disk-type="${DISK_TYPE}" --disk-size="${DISK_SIZE}" \ 34 | --max-pods-per-node="${MAX_PODS_PER_NODE}" --enable-ip-alias \ 35 | --enable-shielded-nodes --metadata=disable-legacy-endpoints=true --no-enable-basic-auth 36 | 37 | # Configure kubectl 38 | gcloud container clusters get-credentials "${CLOUDSDK_CONTAINER_CLUSTER}" 39 | 40 | echo "kubectl configured for $(kubectl config current-context)" 41 | 42 | 43 | -------------------------------------------------------------------------------- /bin/docker-desktop-create-persistent-disk: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This creates the expected persistent disk for docker desktop 4 | # 5 | # e.g. 6 | # docker-desktop-create-persistent-disk neo4j-dev /tmp/neo4j-helm/disk1 7 | 8 | # make bash play nicely 9 | # 10 | set -o pipefail -o errtrace -o errexit -o nounset 11 | shopt -s inherit_errexit 12 | [[ -n "${TRACE:-}" ]] && set -o xtrace 13 | 14 | # Required env vars 15 | RELEASE_NAME="${1:?Missing argument. Usage: docker-desktop-create-persistent-disk /host/path/for/persistent/volume}" 16 | PERSISTENT_VOLUME_HOST_PATH="${2:?Missing argument. Usage: docker-desktop-create-persistent-disk /host/path/for/persistent/volume}" 17 | 18 | if [ -d "${PERSISTENT_VOLUME_HOST_PATH}" ]; then 19 | echo "INFO: Using ${PERSISTENT_VOLUME_HOST_PATH} for neo4j data" 20 | else 21 | echo "INFO: Creating directory ${PERSISTENT_VOLUME_HOST_PATH}" 22 | mkdir -p "${PERSISTENT_VOLUME_HOST_PATH}" 23 | echo "INFO: Using ${PERSISTENT_VOLUME_HOST_PATH} for neo4j data" 24 | fi 25 | 26 | 27 | # Local vars 28 | PERSISTENT_VOLUME_NAME="${RELEASE_NAME}-pv" 29 | NAMESPACE="${NAMESPACE:-default}" 30 | 31 | docker-desktop-configure-kubectl 32 | 33 | if helm get all "${PERSISTENT_VOLUME_NAME}" >/dev/null; then 34 | echo "WARNING: helm release '${PERSISTENT_VOLUME_NAME}' already exists in namespace '${NAMESPACE}'. Exiting..." 35 | exit 1 36 | else 37 | echo "INFO: This will create a helm release called '${PERSISTENT_VOLUME_NAME}' in namespace '${NAMESPACE}'" 38 | fi 39 | 40 | helm install --namespace "${NAMESPACE}" "${PERSISTENT_VOLUME_NAME}" ./neo4j-docker-desktop-pv --set neo4j.name="${RELEASE_NAME}" --set hostPath="${PERSISTENT_VOLUME_HOST_PATH}" 41 | 42 | echo "Persistent volume created. To remove the persistent volume run 'helm uninstall --namespace "${NAMESPACE}" "${PERSISTENT_VOLUME_NAME}"'" 43 | -------------------------------------------------------------------------------- /bin/gcloud-create-filestore: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This creates the expected persistent disk 4 | 5 | # make bash play nicely 6 | # 7 | set -o pipefail -o errtrace -o errexit -o nounset 8 | shopt -s inherit_errexit 9 | [[ -n "${TRACE:-}" ]] && set -o xtrace 10 | 11 | 12 | # Required env vars 13 | FILESTORE_NAME="${1:?Missing argument. Usage: gcloud-create-filestore }" 14 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 15 | 16 | 17 | # Local vars 18 | FILESTORE_SHARE_CAPACITY="${FILESTORE_SHARE_CAPACITY:-1T}" 19 | FILESTORE_TIER="${FILESTORE_TIER:-standard}" 20 | FILESTORE_DESCRIPTION="${FILESTORE_DESCRIPTION:-Neo4j Filestore for backups and logs}" 21 | FILESTORE_SHARE_NAME="${FILESTORE_SHARE_NAME:-neo4j}" 22 | FILESTORE_NETWORK_NAME="${FILESTORE_NETWORK_NAME:-default}" 23 | NAMESPACE="${NAMESPACE:-default}" 24 | 25 | 26 | # Create the filestore 27 | # TODO: handle existing filestore cases ( re use / delete and recreate / other? ) 28 | gcloud filestore instances describe "${FILESTORE_NAME}" --zone="${CLOUDSDK_COMPUTE_ZONE}" 1>&2 || gcloud filestore instances create "${FILESTORE_NAME}" \ 29 | --description="${FILESTORE_DESCRIPTION}" \ 30 | --tier="${FILESTORE_TIER}" \ 31 | --file-share="name=${FILESTORE_SHARE_NAME},capacity=${FILESTORE_SHARE_CAPACITY}" \ 32 | --network="name=${FILESTORE_NETWORK_NAME}" --zone="${CLOUDSDK_COMPUTE_ZONE}" \ 33 | 1>&2 34 | 35 | # lookup the filestore's assigned IP address 36 | FILESTORE_IP="$(gcloud filestore instances describe "${FILESTORE_NAME}" --zone="${CLOUDSDK_COMPUTE_ZONE}" --format='get(networks[0].ipAddresses[0])')" 37 | 38 | 39 | # Print the necessary yaml for use with our pv helm charts 40 | cat << EOF 41 | logs: 42 | capacity: 43 | storage: "${FILESTORE_SHARE_CAPACITY}" 44 | fileShare: "${FILESTORE_SHARE_NAME}" 45 | ipAddress: "${FILESTORE_IP}" 46 | EOF 47 | -------------------------------------------------------------------------------- /bin/run-go-tests: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This run `go test` for all tests defined in the internal module 4 | # The go test output is written to a .log file in the output directory as well as being printed to stdout 5 | 6 | # make bash play nicely 7 | # 8 | set -o pipefail -o errtrace -o errexit -o nounset 9 | shopt -s inherit_errexit 10 | [[ -n "${TRACE:-}" ]] && set -o xtrace 11 | 12 | # TODO: assert that kubernetes and gcloud are correctly configured? 13 | 14 | echo "Checking Go code formatting !!!" 15 | #perform gofmt and store the output in an array 16 | status_code=($(gofmt -l ./internal/)) 17 | 18 | #if any elements found in array it means go code is not formatted and we should exit 19 | # we could have also used a single line test -z $(gofmt -l ./internal) but it would just exit because of errexit 20 | if [ "${#status_code[@]}" != 0 ]; then 21 | echo "*********** PLEASE FORMAT YOUR GO CODE !! ***********" 22 | exit 1 23 | fi 24 | 25 | # run go test and format the output for junit consumption 26 | # 27 | mkdir -p output 28 | 29 | # unit tests 30 | go test -c ./internal/unit_tests/ 31 | go test -c ./internal/integration_tests/ 32 | 33 | if [[ "${GO_TEST_FORMAT:-}" == "json" ]]; then 34 | go tool test2json -t ./unit_tests.test -test.v --test.count ${GO_TEST_COUNT:-1} --test.timeout "${GO_TEST_TIMEOUT:-10m}" "$@" | tee output/go-unit-test.json | gotestfmt 35 | go tool test2json -t ./integration_tests.test -test.v --test.count ${GO_TEST_COUNT:-1} --test.timeout "${GO_TEST_TIMEOUT:-10m}" "$@" | tee output/go-integration-test.json | gotestfmt 36 | else 37 | ./unit_tests.test -test.v --test.count ${GO_TEST_COUNT:-1} --test.timeout "${GO_TEST_TIMEOUT:-10m}" "$@" | tee output/go-unit-test.log | gotestfmt 38 | ./integration_tests.test -test.v --test.count ${GO_TEST_COUNT:-1} --test.timeout "${GO_TEST_TIMEOUT:-10m}" "$@" | tee output/go-integration-test.log | gotestfmt 39 | fi 40 | -------------------------------------------------------------------------------- /neo4j/user-logs.xml: -------------------------------------------------------------------------------- 1 | 2 | 9 | 18 | 19 | 20 | 21 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /neo4j/templates/neo4j-operations.yaml: -------------------------------------------------------------------------------- 1 | {{- $clusterEnabled := eq (include "neo4j.isClusterEnabled" .) "true" }} 2 | 3 | {{- if and (not (kindIs "invalid" $.Values.neo4j.operations)) $.Values.neo4j.operations.enableServer $clusterEnabled -}} 4 | apiVersion: v1 5 | kind: Pod 6 | metadata: 7 | name: {{ include "neo4j.fullname" . }}-operations 8 | labels: 9 | app: "neo4j-operations" 10 | {{- include "neo4j.labels" $.Values.neo4j.operations | indent 4 }} 11 | spec: 12 | restartPolicy: Never 13 | serviceAccountName: {{ include "neo4j.serviceAccountName" . }} 14 | {{- include "neo4j.imagePullSecrets" .Values.image.imagePullSecrets | indent 2 }} 15 | containers: 16 | - name: {{ include "neo4j.fullname" . }}-operations 17 | image: {{ $.Values.neo4j.operations.image }} 18 | imagePullPolicy: "Always" 19 | env: 20 | - name: RELEASE_NAME 21 | value: {{ include "neo4j.fullname" . | quote }} 22 | {{- if and (not (kindIs "invalid" $.Values.neo4j.passwordFromSecret)) (not (empty $.Values.neo4j.passwordFromSecret)) }} 23 | - name: SECRETNAME 24 | value: {{ $.Values.neo4j.passwordFromSecret | quote }} 25 | {{- else }} 26 | - name: SECRETNAME 27 | value: {{ include "neo4j.name" . | printf "%s-auth" | quote }} 28 | {{- end }} 29 | - name: NAMESPACE 30 | value: {{ .Release.Namespace | quote }} 31 | - name: PROTOCOL 32 | value: {{ $.Values.neo4j.operations.protocol | default "neo4j" | quote }} 33 | {{- if $.Values.neo4j.operations.ssl }} 34 | - name: SSL_DISABLE_HOSTNAME_VERIFICATION 35 | value: {{ $.Values.neo4j.operations.ssl.disableHostnameVerification | default false | toString | quote }} 36 | - name: SSL_INSECURE_SKIP_VERIFY 37 | value: {{ $.Values.neo4j.operations.ssl.insecureSkipVerify | default false | toString | quote }} 38 | {{- end }} 39 | {{- end -}} 40 | -------------------------------------------------------------------------------- /internal/unit_tests/helm_template_headless_service_test.go: -------------------------------------------------------------------------------- 1 | package unit_tests 2 | 3 | import ( 4 | "fmt" 5 | "github.com/neo4j/helm-charts/internal/model" 6 | "github.com/stretchr/testify/assert" 7 | v1 "k8s.io/api/core/v1" 8 | "testing" 9 | ) 10 | 11 | func TestHeadlessServiceDefaults(t *testing.T) { 12 | t.Parallel() 13 | 14 | k8s, err := model.HelmTemplate(t, model.HeadlessServiceHelmChart, useNeo4jClusterName) 15 | if !assert.NoError(t, err) { 16 | return 17 | } 18 | 19 | services := k8s.OfType(&v1.Service{}) 20 | assert.Len(t, k8s.All(), 1, "the headless service chart should only create a single K8s object (a Service)") 21 | assert.Len(t, services, 1) 22 | 23 | service := services[0].(*v1.Service) 24 | assert.Equal(t, v1.ServiceType("ClusterIP"), service.Spec.Type) 25 | assert.Equal(t, service.Spec.ClusterIP, "None") 26 | } 27 | 28 | // TestHeadlessServiceForPortRemapping checks whether headless service issues an error during port remapping or not 29 | func TestHeadlessServiceForPortRemapping(t *testing.T) { 30 | 31 | t.Parallel() 32 | 33 | portRemappingMessage := fmt.Sprintf("port re-mapping is not allowed in headless service. Please remove custom port 9000 from values.yaml") 34 | portRemappingArgs := []string{ 35 | "--set", "ports.http.enabled=true", 36 | "--set", "ports.http.port=9000", // this should fail since only 7474 port is allowed 37 | } 38 | 39 | _, err := model.HelmTemplate(t, model.HeadlessServiceHelmChart, append(useNeo4jClusterName, portRemappingArgs...)) 40 | if !assert.Error(t, err) { 41 | return 42 | } 43 | if !assert.Contains(t, err.Error(), portRemappingMessage) { 44 | return 45 | } 46 | 47 | portRemappingArgs = []string{ 48 | "--set", "ports.http.enabled=true", 49 | "--set", "ports.http.port=7474", 50 | } 51 | 52 | _, err = model.HelmTemplate(t, model.HeadlessServiceHelmChart, append(useNeo4jClusterName, portRemappingArgs...)) 53 | if !assert.NoError(t, err) { 54 | return 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /neo4j-headless-service/values.yaml: -------------------------------------------------------------------------------- 1 | # Shared neo4j object 2 | neo4j: 3 | # neo4j.name is required and must match the name of the neo4j instances this service should use 4 | name: "" 5 | edition: "enterprise" 6 | 7 | # Annotations for the external service 8 | annotations: { } 9 | 10 | # Neo4j ports to include in external service 11 | ports: 12 | http: 13 | enabled: true #Set this to false to remove HTTP from this service (this does not affect whether http is enabled for the neo4j process) 14 | # uncomment to publish http on port 80 (neo4j default is 7474) 15 | # port: 80 16 | https: 17 | enabled: true #Set this to false to remove HTTPS from this service (this does not affect whether https is enabled for the neo4j process) 18 | # uncomment to publish http on port 443 (neo4j default is 7474) 19 | # port: 443 20 | bolt: 21 | enabled: true #Set this to false to remove BOLT from this service (this does not affect whether https is enabled for the neo4j process) 22 | # Uncomment to explicitly specify the port to publish Neo4j Bolt (7687 is the default) 23 | # port: 7687 24 | backup: 25 | enabled: false #Set this to true to expose backup port externally (n.b. this could have security implications. Backup is not authenticated by default) 26 | # Uncomment to explicitly specify the port to publish Neo4j Backup (6362 is the default) 27 | # port: 6362 28 | 29 | # A "helm.neo4j.com/neo4j.name" will be applied automatically from `neo4j.name`. 30 | # Specify *additional* selectors to apply here (generally not required). 31 | # If you do not want the automatic selector rename to "selectorOverride" and no "helm.neo4j.com/neo4j.name" will be applied. 32 | selector: 33 | "helm.neo4j.com/neo4j.loadbalancer": "include" 34 | 35 | # Add additional Service.spec here if needed 36 | spec: 37 | type: ClusterIP 38 | clusterIP: None 39 | 40 | # Kubernetes cluster domain suffix 41 | clusterDomain: "cluster.local" 42 | 43 | # Neo4j.conf 44 | config: {} 45 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/reverse-proxy/operations/operations.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "os/exec" 8 | "strings" 9 | ) 10 | 11 | // CheckConnectivity checks if there is connectivity with the provided kubernetes service or not 12 | func CheckConnectivity(hostname string) error { 13 | ports := []string{"7474", "7687"} 14 | for _, port := range ports { 15 | hostPort := fmt.Sprintf("%s:%s", hostname, port) 16 | output, err := exec.Command("nc", "-vz", "-w", "3", hostname, port).CombinedOutput() 17 | if err != nil { 18 | return fmt.Errorf("connectivity cannot be established with %s \n output = %s \n err = %v", 19 | hostPort, 20 | string(output), 21 | err) 22 | } 23 | outputString := strings.ToLower(string(output)) 24 | if !strings.Contains(outputString, "open") && !strings.Contains(outputString, "succeeded") { 25 | return fmt.Errorf("connectivity cannot be established with %s. Missing 'open' in output \n output = %s", 26 | hostPort, 27 | string(output)) 28 | } 29 | log.Printf("Connectivity established with Service %s!!", hostPort) 30 | } 31 | 32 | return nil 33 | } 34 | 35 | // CheckEnvVariables checks if the environment variables required are present or not 36 | func CheckEnvVariables() []error { 37 | envVarNames := []string{"SERVICE_NAME", "NAMESPACE", "DOMAIN", "PORT"} 38 | _, isIPPresent := os.LookupEnv("IP") 39 | var errs []error 40 | for _, name := range envVarNames { 41 | _, present := os.LookupEnv(name) 42 | if !present { 43 | switch name { 44 | case "DOMAIN": 45 | os.Setenv("DOMAIN", "cluster.local") 46 | continue 47 | case "NAMESPACE": 48 | os.Setenv("NAMESPACE", "default") 49 | continue 50 | default: 51 | if isIPPresent && name == "SERVICE_NAME" { 52 | continue 53 | } 54 | errs = append(errs, fmt.Errorf(" Missing %s environment variable !! ", name)) 55 | } 56 | } 57 | } 58 | if len(errs) != 0 { 59 | return errs 60 | } 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /examples/bloom-gds-license/README.md: -------------------------------------------------------------------------------- 1 | # Deploy Neo4j with enterprise Bloom and GDS plugins 2 | 3 | This example demonstrates deploying a standalone Neo4j server with Bloom and GDS plugins, both with and without license keys. 4 | 5 | # Deploy Neo4j and GDS without license 6 | The script will use the following Helm values 7 | ```yaml 8 | neo4j: 9 | name: gds-no-license 10 | acceptLicenseAgreement: "yes" 11 | edition: enterprise 12 | volumes: 13 | data: 14 | mode: defaultStorageClass 15 | env: 16 | NEO4J_PLUGINS: '["graph-data-science"]' 17 | config: 18 | dbms.security.procedures.unrestricted: "gds.*,apoc.*" 19 | ``` 20 | 21 | To install using these values, run the script: 22 | ```shell 23 | ./install-gds-no-license.sh 24 | ``` 25 | 26 | # Deploy Neo4j, GDS and Bloom with license files 27 | The script will use the following Helm values 28 | ```yaml 29 | neo4j: 30 | name: licenses 31 | acceptLicenseAgreement: "yes" 32 | edition: enterprise 33 | volumes: 34 | data: 35 | mode: defaultStorageClass 36 | licenses: 37 | mode: volume 38 | volume: 39 | secret: 40 | secretName: gds-bloom-license 41 | items: 42 | - key: gds.license 43 | path: gds.license 44 | - key: bloom.license 45 | path: bloom.license 46 | env: 47 | NEO4J_PLUGINS: '["graph-data-science", "bloom"]' 48 | config: 49 | gds.enterprise.license_file: "/licenses/gds.license" 50 | dbms.security.procedures.unrestricted: "gds.*,apoc.*,bloom.*" 51 | server.unmanaged_extension_classes: "com.neo4j.bloom.server=/bloom,semantics.extension=/rdf" 52 | dbms.security.http_auth_allowlist: "/,/browser.*,/bloom.*" 53 | dbms.bloom.license_file: "/licenses/bloom.license" 54 | ``` 55 | To install using these values, run the script: 56 | 57 | **N.B The example requires the license files for Bloom and GDS and assumes the files are named gds.license and bloom.license** 58 | ```shell 59 | ./install-gds-bloom-with-license.sh /path/to/gds.license /path/to/bloom.license 60 | ``` 61 | -------------------------------------------------------------------------------- /neo4j-persistent-volume/templates/data-pv.yaml: -------------------------------------------------------------------------------- 1 | {{- with .Values.data }} 2 | kind: "PersistentVolume" 3 | apiVersion: "v1" 4 | metadata: 5 | # n.b. persistent volumes don't seem to belong to namespaces 6 | name: "{{ $.Release.Name }}-pv" 7 | labels: 8 | # the app name is used to link this persistent volume to the Neo4j StatefulSet 9 | app: "{{ template "neo4j.appName" $ }}" 10 | helm.neo4j.com/volume-role: "data" 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | capacity: 15 | storage: "{{ required "data.capacity.storage must be set explicitly" .capacity.storage }}" 16 | csi: 17 | driver: "{{ required "data.driver is required (e.g. my-disk)" .driver }}" 18 | volumeHandle: "{{ required "data.volumeHandle is required (e.g. my-disk)" .volumeHandle }}" 19 | fsType: "{{ .fsType }}" 20 | persistentVolumeReclaimPolicy: "{{ .reclaimPolicy }}" 21 | storageClassName: "{{ .storageClassName }}" 22 | {{- if .createPvc}} 23 | claimRef: 24 | name: "{{ $.Release.Name }}-pvc" 25 | namespace: {{ $.Release.Namespace }} 26 | --- 27 | apiVersion: v1 28 | kind: PersistentVolumeClaim 29 | metadata: 30 | name: "{{ $.Release.Name }}-pvc" 31 | labels: 32 | helm.neo4j.com/volume-role: "data" 33 | app: "{{ template "neo4j.appName" $ }}" 34 | spec: 35 | storageClassName: "{{ .storageClassName }}" 36 | volumeName: "{{ $.Release.Name }}-pv" 37 | accessModes: 38 | - ReadWriteOnce 39 | resources: 40 | requests: 41 | storage: "{{ required "data.capacity.storage must be set explicitly" .capacity.storage }}" 42 | {{- end }} 43 | {{- if .createStorageClass}} 44 | {{- $storageClass := (lookup "storage.k8s.io/v1" "StorageClass" $.Release.Namespace $.Values.data.storageClassName) }} 45 | {{- if not $storageClass }} 46 | --- 47 | kind: StorageClass 48 | apiVersion: storage.k8s.io/v1 49 | metadata: 50 | name: manual 51 | provisioner: kubernetes.io/no-provisioner 52 | volumeBindingMode: WaitForFirstConsumer 53 | 54 | {{- end }} 55 | {{- end }} 56 | {{- end }} 57 | -------------------------------------------------------------------------------- /neo4j/neo4j-operations/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/neo4j/neo4j-operations 2 | 3 | go 1.23.8 4 | 5 | require ( 6 | github.com/neo4j/neo4j-go-driver/v5 v5.20.0 7 | k8s.io/apimachinery v0.30.0 8 | k8s.io/client-go v0.30.0 9 | ) 10 | 11 | require ( 12 | github.com/davecgh/go-spew v1.1.1 // indirect 13 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 14 | github.com/go-logr/logr v1.4.1 // indirect 15 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 16 | github.com/go-openapi/jsonreference v0.20.2 // indirect 17 | github.com/go-openapi/swag v0.22.3 // indirect 18 | github.com/gogo/protobuf v1.3.2 // indirect 19 | github.com/golang/protobuf v1.5.4 // indirect 20 | github.com/google/gnostic-models v0.6.8 // indirect 21 | github.com/google/gofuzz v1.2.0 // indirect 22 | github.com/google/uuid v1.3.0 // indirect 23 | github.com/josharian/intern v1.0.0 // indirect 24 | github.com/json-iterator/go v1.1.12 // indirect 25 | github.com/mailru/easyjson v0.7.7 // indirect 26 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 27 | github.com/modern-go/reflect2 v1.0.2 // indirect 28 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 29 | golang.org/x/net v0.38.0 // indirect 30 | golang.org/x/oauth2 v0.27.0 // indirect 31 | golang.org/x/sys v0.31.0 // indirect 32 | golang.org/x/term v0.30.0 // indirect 33 | golang.org/x/text v0.23.0 // indirect 34 | golang.org/x/time v0.3.0 // indirect 35 | google.golang.org/protobuf v1.33.0 // indirect 36 | gopkg.in/inf.v0 v0.9.1 // indirect 37 | gopkg.in/yaml.v2 v2.4.0 // indirect 38 | gopkg.in/yaml.v3 v3.0.1 // indirect 39 | k8s.io/api v0.30.0 // indirect 40 | k8s.io/klog/v2 v2.120.1 // indirect 41 | k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect 42 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect 43 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 44 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 45 | sigs.k8s.io/yaml v1.3.0 // indirect 46 | ) 47 | -------------------------------------------------------------------------------- /internal/integration_tests/maintenance.go: -------------------------------------------------------------------------------- 1 | package integration_tests 2 | 3 | import ( 4 | "github.com/neo4j/helm-charts/internal/model" 5 | "github.com/stretchr/testify/assert" 6 | "time" 7 | ) 8 | import "testing" 9 | 10 | func exitMaintenanceMode(t *testing.T, releaseName model.ReleaseName, chart model.Neo4jHelmChartBuilder, extraArgs ...string) error { 11 | err := run( 12 | t, "helm", model.BaseHelmCommand("upgrade", releaseName, chart, model.Neo4jEdition, append(extraArgs, "--set", "neo4j.offlineMaintenanceModeEnabled=false", "--set", "neo4j.name="+model.DefaultNeo4jName, "--wait", "--timeout", "300s")...)..., 13 | ) 14 | if !assert.NoError(t, err) { 15 | return err 16 | } 17 | 18 | err = run(t, "kubectl", "--namespace", string(releaseName.Namespace()), "rollout", "status", "--watch", "--timeout=120s", "statefulset/"+releaseName.String()) 19 | if !assert.NoError(t, err) { 20 | return err 21 | } 22 | return err 23 | } 24 | 25 | func enterMaintenanceMode(t *testing.T, releaseName model.ReleaseName, chart model.Neo4jHelmChartBuilder) error { 26 | err := run(t, "helm", model.BaseHelmCommand("upgrade", releaseName, chart, model.Neo4jEdition, "--set", "neo4j.offlineMaintenanceModeEnabled=true", "--set", "neo4j.name="+model.DefaultNeo4jName)...) 27 | 28 | if !assert.NoError(t, err) { 29 | return err 30 | } 31 | 32 | time.Sleep(30 * time.Second) 33 | err = run(t, "kubectl", "--namespace", string(releaseName.Namespace()), "wait", "--for=condition=Initialized", "--timeout=300s", "pod/"+releaseName.PodName()) 34 | 35 | if !assert.NoError(t, err) { 36 | return err 37 | } 38 | time.Sleep(30 * time.Second) 39 | 40 | return err 41 | } 42 | 43 | func checkNeo4jNotRunning(t *testing.T, releaseName model.ReleaseName) error { 44 | cmd := []string{ 45 | "ps", 46 | "ax", 47 | } 48 | stdout, stderr, err := ExecInPod(releaseName, cmd, "") 49 | assert.Contains(t, stdout, "Neo4j is not running. Pod is in offline maintenance mode") 50 | assert.Empty(t, stderr) 51 | assert.NoError(t, err) 52 | return err 53 | } 54 | -------------------------------------------------------------------------------- /neo4j/templates/_ssl.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.ssl.volumesFromSecrets" -}} 2 | {{- range $name, $sslSpec := . -}} 3 | {{- if ( or $sslSpec.privateKey.secretName $sslSpec.publicCertificate.secretName ) }} 4 | - name: "{{ $name }}-cert" 5 | secret: 6 | secretName: "{{ required "When ssl.{{ $name }}.publicCertificate is set then ssl.{{ $name }}.privateKey.secretName must also be provided" $sslSpec.publicCertificate.secretName }}" 7 | - name: "{{ $name }}-key" 8 | secret: 9 | secretName: "{{ required "When ssl.bolt.privateKey is set then ssl.bolt.publicCertificate.secretName must also be provided" $sslSpec.privateKey.secretName }}" 10 | {{- if $sslSpec.trustedCerts.sources }} 11 | - name: "{{ $name }}-trusted" 12 | projected: 13 | defaultMode: 0440 14 | {{ $sslSpec.trustedCerts | toYaml | nindent 4 }} 15 | {{- end }} 16 | {{- if $sslSpec.revokedCerts.sources -}} 17 | - name: "{{ $name }}-revoked" 18 | projected: 19 | defaultMode: 0440 20 | {{ $sslSpec.revokedCerts | toYaml | nindent 4 }} 21 | {{/* blank line, important! */}}{{ end -}} 22 | {{- end -}} 23 | {{- end -}} 24 | {{- end -}} 25 | 26 | {{- define "neo4j.ssl.volumeMountsFromSecrets" -}} 27 | {{- range $name, $sslSpec := . -}} 28 | {{- if ( or $sslSpec.privateKey.secretName $sslSpec.publicCertificate.secretName ) }} 29 | - name: "{{ $name }}-cert" 30 | mountPath: /var/lib/neo4j/certificates/{{ $name }}/public.crt 31 | subPath: "{{ $sslSpec.publicCertificate.subPath | default "public.crt" }}" 32 | readOnly: true 33 | - name: "{{ $name }}-key" 34 | mountPath: "/var/lib/neo4j/certificates/{{ $name }}/private.key" 35 | subPath: "{{ $sslSpec.privateKey.subPath | default "private.key" }}" 36 | readOnly: true 37 | {{- if $sslSpec.trustedCerts.sources }} 38 | - name: "{{ $name }}-trusted" 39 | mountPath: "/var/lib/neo4j/certificates/{{ $name }}/trusted" 40 | readOnly: true 41 | {{- end -}} 42 | {{- if $sslSpec.revokedCerts.sources }} 43 | - name: "{{ $name }}-revoked" 44 | mountPath: "/var/lib/neo4j/certificates/{{ $name }}/revoked" 45 | readOnly: true 46 | {{- end -}} 47 | {{- end -}} 48 | {{- end -}} 49 | {{- end -}} 50 | -------------------------------------------------------------------------------- /internal/integration_tests/gcloud/commands.go: -------------------------------------------------------------------------------- 1 | package gcloud 2 | 3 | import ( 4 | "os/exec" 5 | "strings" 6 | "testing" 7 | "time" 8 | 9 | . "github.com/neo4j/helm-charts/internal/helpers" 10 | "github.com/neo4j/helm-charts/internal/model" 11 | ) 12 | 13 | func InstallGcloud(t *testing.T, zone Zone, project Project, releaseName model.ReleaseName) (Closeable, *model.PersistentDiskName, error) { 14 | 15 | err := run(t, "gcloud", "container", "clusters", "get-credentials", string(CurrentCluster())) 16 | if err != nil { 17 | return nil, nil, err 18 | } 19 | 20 | diskName, cleanupDisk, err := createDisk(t, zone, project, releaseName) 21 | if err != nil { 22 | return cleanupDisk, nil, err 23 | } 24 | 25 | return cleanupDisk, &diskName, err 26 | } 27 | 28 | func run(t *testing.T, command string, args ...string) error { 29 | t.Logf("running: %s %s\n", command, args) 30 | out, err := exec.Command(command, args...).CombinedOutput() 31 | if out != nil { 32 | t.Logf("output: %s\n", out) 33 | } 34 | return err 35 | } 36 | 37 | func createDisk(t *testing.T, zone Zone, project Project, releaseName model.ReleaseName) (model.PersistentDiskName, Closeable, error) { 38 | diskName := releaseName.DiskName() 39 | err := run(t, "gcloud", "compute", "disks", "create", "--size", model.StorageSize, "--type", "pd-ssd", string(diskName), "--zone="+string(zone), "--project="+string(project)) 40 | return diskName, func() error { return deleteDisk(t, zone, project, string(diskName)) }, err 41 | } 42 | 43 | func deleteDisk(t *testing.T, zone Zone, project Project, diskName string) error { 44 | delete := func() error { 45 | return run(t, "gcloud", "compute", "disks", "delete", diskName, "--quiet", "--zone="+string(zone), "--project="+string(project)) 46 | } 47 | err := delete() 48 | if err != nil { 49 | timeout := time.After(1 * time.Minute) 50 | for { 51 | select { 52 | case <-timeout: 53 | return err 54 | default: 55 | if err = delete(); err == nil { 56 | return err 57 | } else if strings.Contains(err.Error(), "was not found") { 58 | return err 59 | } 60 | } 61 | } 62 | } 63 | return err 64 | } 65 | -------------------------------------------------------------------------------- /internal/unit_tests/backup_operations_test.go: -------------------------------------------------------------------------------- 1 | package unit_tests 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/neo4j/helm-charts/internal/backup" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestDeleteBackupFiles(t *testing.T) { 13 | // Create a temporary directory and files 14 | tmpDir := t.TempDir() 15 | 16 | // Create test files 17 | testFiles := []string{"backup1.backup", "backup2.backup"} 18 | for _, file := range testFiles { 19 | filePath := filepath.Join(tmpDir, file) 20 | f, err := os.Create(filePath) 21 | assert.NoError(t, err) 22 | f.Close() 23 | } 24 | 25 | // Set environment variables 26 | os.Setenv("KEEP_BACKUP_FILES", "false") 27 | os.Setenv("BACKUP_DIR", tmpDir) 28 | 29 | // Test deletion 30 | err := backup.DeleteBackupFiles(testFiles, []string{}) 31 | assert.NoError(t, err) 32 | 33 | // Verify files are deleted 34 | for _, file := range testFiles { 35 | filePath := filepath.Join(tmpDir, file) 36 | _, err := os.Stat(filePath) 37 | assert.True(t, os.IsNotExist(err), "File should be deleted") 38 | } 39 | 40 | // Clean up environment 41 | os.Unsetenv("KEEP_BACKUP_FILES") 42 | os.Unsetenv("BACKUP_DIR") 43 | } 44 | 45 | func TestKeepBackupFiles(t *testing.T) { 46 | // Create a temporary directory and files 47 | tmpDir := t.TempDir() 48 | 49 | // Create test files 50 | testFiles := []string{"backup1.backup", "backup2.backup"} 51 | for _, file := range testFiles { 52 | filePath := filepath.Join(tmpDir, file) 53 | f, err := os.Create(filePath) 54 | assert.NoError(t, err) 55 | f.Close() 56 | } 57 | 58 | // Set environment variables to keep files 59 | os.Setenv("KEEP_BACKUP_FILES", "true") 60 | os.Setenv("BACKUP_DIR", tmpDir) 61 | 62 | // Test keep files 63 | err := backup.DeleteBackupFiles(testFiles, []string{}) 64 | assert.NoError(t, err) 65 | 66 | // Verify files are NOT deleted 67 | files, err := os.ReadDir(tmpDir) 68 | assert.NoError(t, err) 69 | assert.Equal(t, len(testFiles), len(files), "Files should not be deleted when KEEP_BACKUP_FILES=true") 70 | 71 | // Clean up environment 72 | os.Unsetenv("KEEP_BACKUP_FILES") 73 | os.Unsetenv("BACKUP_DIR") 74 | } 75 | -------------------------------------------------------------------------------- /internal/model/neo4j.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "gopkg.in/ini.v1" 5 | "strings" 6 | ) 7 | 8 | const neo4jConfJvmAdditionalKey = "server.jvm.additional" 9 | 10 | type Neo4jConfiguration struct { 11 | conf map[string]string 12 | jvmArgs []string 13 | } 14 | 15 | func (c *Neo4jConfiguration) JvmArgs() []string { 16 | return c.jvmArgs 17 | } 18 | 19 | func (c *Neo4jConfiguration) Conf() map[string]string { 20 | return c.conf 21 | } 22 | 23 | func (c *Neo4jConfiguration) PopulateFromFile(filename string) (*Neo4jConfiguration, error) { 24 | yamlFile, err := ini.ShadowLoad(filename) 25 | if err != nil { 26 | return nil, err 27 | } 28 | defaultSection := yamlFile.Section("") 29 | 30 | jvmAdditional, err := defaultSection.GetKey(neo4jConfJvmAdditionalKey) 31 | if err != nil { 32 | return nil, err 33 | } 34 | c.jvmArgs = jvmAdditional.StringsWithShadows("\n") 35 | c.conf = defaultSection.KeysHash() 36 | delete(c.conf, neo4jConfJvmAdditionalKey) 37 | 38 | return c, err 39 | } 40 | 41 | func (c *Neo4jConfiguration) Update(other Neo4jConfiguration, appendJvmArgs bool) Neo4jConfiguration { 42 | var jvmArgs = c.jvmArgs 43 | if len(other.jvmArgs) > 0 { 44 | if appendJvmArgs { 45 | jvmArgs = append(jvmArgs, other.jvmArgs...) 46 | } else { 47 | jvmArgs = other.jvmArgs 48 | } 49 | } 50 | 51 | for k, v := range other.conf { 52 | c.conf[k] = v 53 | } 54 | c.jvmArgs = jvmArgs 55 | 56 | return Neo4jConfiguration{ 57 | jvmArgs: c.jvmArgs, 58 | conf: c.conf, 59 | } 60 | } 61 | 62 | func (c *Neo4jConfiguration) UpdateFromMap(other map[string]string, appendJvmArgs bool) Neo4jConfiguration { 63 | var jvmArgs = c.jvmArgs 64 | if otherArgsString, found := other["jvmArgs"]; found { 65 | otherJvmArgs := []string{} 66 | for _, arg := range strings.Split(otherArgsString, "\n") { 67 | otherJvmArgs = append(otherJvmArgs, strings.TrimSpace(arg)) 68 | } 69 | if appendJvmArgs { 70 | jvmArgs = append(jvmArgs, otherJvmArgs...) 71 | } else { 72 | jvmArgs = otherJvmArgs 73 | } 74 | delete(other, "jvmArgs") 75 | } 76 | for k, v := range other { 77 | c.conf[k] = v 78 | } 79 | c.jvmArgs = jvmArgs 80 | 81 | return Neo4jConfiguration{ 82 | jvmArgs: c.jvmArgs, 83 | conf: c.conf, 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /bin/gcloud/index_yaml_update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # make bash play nicely 3 | # 4 | set -o pipefail -o errtrace -o errexit -o nounset 5 | shopt -s inherit_errexit 6 | [[ -n "${TRACE:-}" ]] && set -o xtrace 7 | 8 | HELM_CHART_VERSION="${HELM_CHART_VERSION:?HELM_CHART_VERSION is required}" 9 | DOCKER_IMAGE_VERSION="${DOCKER_IMAGE_VERSION:?DOCKER_IMAGE_VERSION is required}" 10 | # neo4j or neo4j-experimental 11 | HELM_REPO_NAME="${HELM_REPO_NAME:?HELM_REPO_NAME is required}" 12 | 13 | # Use the provided chart version directly 14 | CHART_VERSION="${HELM_CHART_VERSION}" 15 | echo "Using chart version ${CHART_VERSION} and docker image version ${DOCKER_IMAGE_VERSION}" 16 | 17 | echo "sleeping for 300 seconds" 18 | sleep 300 19 | helm repo add neo4j "https://helm.neo4j.com/${HELM_REPO_NAME}" 20 | helm repo update 21 | 22 | echo "Pulling helm charts for version ${CHART_VERSION}" 23 | helm pull neo4j/neo4j --version "${CHART_VERSION}" || echo "Failed to pull neo4j chart" 24 | helm pull neo4j/neo4j-admin --version "${CHART_VERSION}" || echo "Failed to pull neo4j-admin chart" 25 | helm pull neo4j/neo4j-headless-service --version "${CHART_VERSION}" || echo "Failed to pull neo4j-headless-service chart" 26 | helm pull neo4j/neo4j-persistent-volume --version "${CHART_VERSION}" || echo "Failed to pull neo4j-persistent-volume chart" 27 | helm pull neo4j/neo4j-reverse-proxy --version "${CHART_VERSION}" || echo "Failed to pull neo4j-reverse-proxy chart" 28 | helm pull neo4j/neo4j-loadbalancer --version "${CHART_VERSION}" || echo "Failed to pull neo4j-loadbalancer chart" 29 | 30 | helm repo index . --merge index.yaml --url https://github.com/neo4j/helm-charts/releases/download/"${CHART_VERSION}" 31 | 32 | # First, commit any pending Chart.yaml and values.yaml version updates 33 | if git diff --cached --quiet; then 34 | echo "No staged version changes to commit" 35 | else 36 | echo "Committing staged version changes" 37 | git commit -m "Update chart versions to ${CHART_VERSION} and appVersion to ${DOCKER_IMAGE_VERSION}" 38 | git push 39 | fi 40 | 41 | # Then commit index.yaml 42 | git add index.yaml 43 | git commit -m "Updating index.yaml with chart version ${CHART_VERSION} (docker image version ${DOCKER_IMAGE_VERSION})" 44 | git push 45 | git tag -m "Tagging ${CHART_VERSION}" -a "${CHART_VERSION}" 46 | git push --tags -------------------------------------------------------------------------------- /neo4j-reverse-proxy/templates/reverseProxyServer.yaml: -------------------------------------------------------------------------------- 1 | {{- $port := include "neo4j.reverseProxy.port" . -}} 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: {{ include "neo4j.fullname" . }}-reverseproxy-dep 6 | labels: 7 | name: {{ include "neo4j.fullname" . }}-reverseproxy-dep 8 | namespace: "{{ .Release.Namespace }}" 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | name: {{ include "neo4j.fullname" . }}-reverseproxy 14 | template: 15 | metadata: 16 | name: {{ include "neo4j.fullname" . }}-reverseproxy 17 | labels: 18 | name: {{ include "neo4j.fullname" . }}-reverseproxy 19 | {{- include "neo4j.labels" $.Values.reverseProxy.podLabels | indent 8 }} 20 | spec: 21 | securityContext: {{ toYaml .Values.reverseProxy.podSecurityContext | nindent 8 }} 22 | {{- include "neo4j.nodeSelector" . | nindent 6 }} 23 | {{- if .Values.reverseProxy.imagePullSecrets }} 24 | imagePullSecrets: 25 | {{- range .Values.reverseProxy.imagePullSecrets }} 26 | - name: {{ . }} 27 | {{- end }} 28 | {{- end }} 29 | containers: 30 | - name: {{ include "neo4j.fullname" . }}-reverseproxy 31 | image: {{ $.Values.reverseProxy.image }} 32 | imagePullPolicy: Always 33 | securityContext: {{ toYaml .Values.reverseProxy.containerSecurityContext | nindent 12 }} 34 | ports: 35 | - containerPort: {{ $port }} 36 | env: 37 | - name: SERVICE_NAME 38 | value: {{ $.Values.reverseProxy.serviceName }} 39 | - name: PORT 40 | value: {{ add $port 8000 | quote }} 41 | - name: DOMAIN 42 | value: {{ $.Values.reverseProxy.domain | default "cluster.local" }} 43 | - name: NAMESPACE 44 | value: {{ $.Values.reverseProxy.namespace | default .Release.Namespace }} 45 | --- 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: {{ include "neo4j.fullname" . }}-reverseproxy-service 50 | namespace: "{{ .Release.Namespace }}" 51 | spec: 52 | type: ClusterIP 53 | selector: 54 | name: {{ include "neo4j.fullname" . }}-reverseproxy 55 | ports: 56 | - protocol: TCP 57 | port: {{ $port }} 58 | targetPort: {{ add $port 8000 }} 59 | --- 60 | 61 | -------------------------------------------------------------------------------- /neo4j/templates/neo4j-imagePullSecret.yaml: -------------------------------------------------------------------------------- 1 | 2 | {{- template "neo4j.imageCredentials.checkForEmptyFieldsAndDuplicates" . -}} 3 | 4 | {{- range $index, $imagePullSecret := $.Values.image.imagePullSecrets -}} 5 | 6 | {{/* This is to avoid invalid imagePullSecret value*/}} 7 | {{- if kindIs "invalid" $imagePullSecret -}} 8 | {{ fail (printf "Missing imagePullSecret name") }} 9 | {{- end -}} 10 | 11 | {{/* This is to avoid empty or spaces in imagePullSecrets*/}} 12 | {{- if ne (len ($imagePullSecret | trim)) 0 -}} 13 | {{- $imagePullSecret = $imagePullSecret | trim -}} 14 | {{/* assuming secret exists by default, this will lead to no secret creation when disableLookups is enabled */}} 15 | {{- $secretExists := true }} 16 | {{- if not $.Values.disableLookups -}} 17 | {{- $secret := (lookup "v1" "Secret" $.Release.Namespace $imagePullSecret) -}} 18 | {{- $secretExists = $secret | all -}} 19 | {{- end -}} 20 | 21 | {{- $imageCredentialElement := include "neo4j.imageCredentials.getImageCredential" (dict "imagePullSecret" $imagePullSecret "imageCredentials" $.Values.image.imageCredentials) | fromYaml -}} 22 | 23 | {{/* create secret only and only if it does not exist in the cluster */}} 24 | {{- if not $secretExists -}} 25 | 26 | {{/* throw error if there is no imageCredential entry for the respective imagePullSecret and disableLookups is not set */}} 27 | {{/* We should not be checking for a respective imageCredential entry when lookups are disabled */}} 28 | {{- if and (empty $imageCredentialElement) (not $.Values.disableLookups) -}} 29 | {{ fail (printf "No docker-registry secret exists for imagePullSecret \"%s\" in the cluster. \n Missing imageCredential entry for \"%s\"" $imagePullSecret $imagePullSecret) }} 30 | {{- else }} 31 | apiVersion: v1 32 | kind: Secret 33 | metadata: 34 | name: "{{ $imagePullSecret }}" 35 | namespace: "{{ $.Release.Namespace }}" 36 | labels: 37 | app: "{{ $.Values.neo4j.name }}" 38 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 39 | type: kubernetes.io/dockerconfigjson 40 | data: 41 | .dockerconfigjson: {{ include "neo4j.imagePullSecret.dockerConfigJson" $imageCredentialElement }} 42 | --- 43 | {{- end -}} 44 | {{- end -}} 45 | {{- end }} 46 | {{- end -}} 47 | -------------------------------------------------------------------------------- /neo4j-persistent-volume/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for Neo4j Disk for Google Cloud Platform. 2 | 3 | neo4j: 4 | # name of your neo4j deployment 5 | name: "" 6 | 7 | # Neo4j data volume (a Google Cloud Compute Persistent disk) 8 | data: 9 | # gcePersistentDisk should be the name of a Google Cloud Compute Persistent disk that is in a Region/Zone accessible by your GKE cluster. 10 | # This persistent disk must already exist and not be in use 11 | # 12 | # A disk can be created using the gcloud cli: 13 | # gcloud compute disks create --size 100Gi --type pd-ssd --zone="${CLOUDSDK_COMPUTE_ZONE}" "${GCE_PERSISTENT_DISK_NAME}" 14 | # n.b. in Google Cloud disk IOPS are proportional to disk size. For good performance of Neo4j you may need to specify a disk size significantly larger than the 15 | # amount of storage that Neo4j requires. 16 | # We recommend a disk size of at least 1Ti for performance or load testing and at least 100Gi for development work. 17 | 18 | # CSI driver for storage provisioning. For AWS use ebs.csi.aws.com, GCP use pd.csi.storage.gke.io and Azure use disk.csi.azure.com 19 | driver: "" 20 | # The reference to pre-created disk. 21 | # For AWS use Volume ID, 22 | # GCP use projects/{project_id}/zones/{zone_name}/disks/{disk_name} 23 | # Azure use /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id} 24 | volumeHandle: "" 25 | # size of disk used by K8s when scheduling and binding, this has to be passed explicitly to K8s, it's not capable of figuring this out for itself. 26 | capacity: 27 | storage: "" 28 | 29 | # storageClassName to attach to the PersistentVolume in K8s. Can be set to empty string. 30 | storageClassName: "manual" 31 | fsType: "ext4" 32 | createPvc: true 33 | createStorageClass: false 34 | reclaimPolicy: Retain 35 | 36 | 37 | # Neo4j ops volume (a Google Cloud Filestore File Share) 38 | # not suitable for Neo4j database storage but useful for storing metrics / logs / backups and other operational files. 39 | ops: 40 | # name of the Filestore file share to use (do NOT include a leading /) 41 | fileShare: "" 42 | # IP address of Filestore instance 43 | ipAddress: "" 44 | # size of disk used by K8s when scheduling and binding, this has to be passed explicitly to K8s, it's not capable of figuring this out for itself. 45 | capacity: 46 | storage: "" 47 | 48 | storageClassName: "" 49 | -------------------------------------------------------------------------------- /neo4j/templates/neo4j-auth.yaml: -------------------------------------------------------------------------------- 1 | {{- $authDisabled := index .Values.config "dbms.security.auth_enabled" | default "true" | regexMatch "(?i)no|false" -}} 2 | 3 | {{- if $authDisabled -}} 4 | {{- if (or (.Values.neo4j.passwordFromSecret) (.Values.neo4j.password) ) -}} 5 | {{ fail "unsupported State: Cannot set neo4j.password or neo4j.passwordFromSecret when Neo4j authis disabled (dbms.security.auth_enabled=false). Either remove neo4j.password setting or enable Neo4j auth" }} 6 | {{- end -}} 7 | 8 | {{- else -}} 9 | {{- if not (.Values.neo4j.passwordFromSecret ) -}} 10 | {{- $secretName := include "neo4j.name" . | printf "%s-auth" }} 11 | {{- $secretExists := false }} 12 | {{- $secret := list -}} 13 | {{- if not .Values.disableLookups -}} 14 | {{- $secret = (lookup "v1" "Secret" .Release.Namespace $secretName) }} 15 | {{- $secretExists = $secret | all }} 16 | {{- end -}} 17 | 18 | {{- $secretBelongsToSomeoneElse := false }} 19 | {{- if $secretExists }} 20 | {{- $secretBelongsToSomeoneElse = index $secret.metadata.annotations "meta.helm.sh/release-name" | eq .Release.Name | not }} 21 | {{- end }} 22 | 23 | {{- if $secretBelongsToSomeoneElse -}} 24 | 25 | {{- if eq (len .Values.neo4j.password) 0 -}} 26 | 27 | {{- $password := index $secret.data "NEO4J_AUTH" | b64dec | trimPrefix "neo4j/" -}} 28 | {{- $ignored := set .Values.neo4j "password" $password -}} 29 | 30 | {{- else -}} 31 | 32 | {{- $expectedPassword := .Values.neo4j.password | printf "neo4j/%v" | b64enc -}} 33 | {{- $existingPassword := index $secret.data "NEO4J_AUTH" -}} 34 | 35 | {{- if ne $existingPassword $expectedPassword -}} 36 | {{ fail "The desired password does not match the password stored in the Kubernetes Secret. For details on how to change the neo4j password via helm check the Neo4j Operations Manual." }} 37 | {{- else -}} 38 | # Using the existing Kubernetes Secret {{ $secretName }} for the neo4j password. 39 | {{- end -}} 40 | {{- end -}} 41 | 42 | {{- else -}} 43 | 44 | {{- $password := include "neo4j.password" . | printf "neo4j/%v" | b64enc -}} 45 | apiVersion: v1 46 | kind: Secret 47 | metadata: 48 | name: "{{ $secretName }}" 49 | namespace: "{{ .Release.Namespace }}" 50 | labels: 51 | app: "{{ template "neo4j.name" . }}" 52 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 53 | type: Opaque 54 | data: 55 | NEO4J_AUTH: "{{ $password }}" 56 | {{- end -}} 57 | {{- end -}} 58 | {{- end -}} 59 | 60 | -------------------------------------------------------------------------------- /neo4j/templates/_ldap.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.ldapPasswordFromSecretExistsOrNot" -}} 2 | {{- template "neo4j.ldapPasswordMountPath" . -}} 3 | {{- if (.Values.ldapPasswordFromSecret | trim) -}} 4 | {{- if (not .Values.disableLookups) -}} 5 | {{- $secret := (lookup "v1" "Secret" .Release.Namespace .Values.ldapPasswordFromSecret) }} 6 | {{- $secretExists := $secret | all }} 7 | 8 | {{- if (not $secretExists) -}} 9 | {{ fail (printf "Secret %s configured in 'ldapPasswordFromSecret' not found" .Values.ldapPasswordFromSecret) }} 10 | {{- else if not (hasKey $secret.data "LDAP_PASS") -}} 11 | {{ fail (printf "Secret %s must contain key LDAP_PASS" .Values.ldapPasswordFromSecret) }} 12 | {{- end -}} 13 | {{- end -}} 14 | {{- true -}} 15 | {{- end -}} 16 | {{- end -}} 17 | 18 | {{/* checks if ldapPasswordMountPath is set or not when ldapPasswordFromSecret is defined */}} 19 | {{- define "neo4j.ldapPasswordMountPath" -}} 20 | {{- if or (.Values.ldapPasswordMountPath | trim) (.Values.ldapPasswordFromSecret | trim) -}} 21 | {{- if not (eq .Values.neo4j.edition "enterprise") -}} 22 | {{ fail (printf "ldapPasswordFromSecret and ldapPasswordMountPath are Enterprise Edition feature only. Please set edition to enterprise via --set neo4j.edition=\"enterprise\"") }} 23 | {{- end -}} 24 | {{- end -}} 25 | 26 | {{- if and (.Values.ldapPasswordFromSecret | trim) (not (.Values.ldapPasswordMountPath | trim)) -}} 27 | {{ fail (printf "Please define 'ldapPasswordMountPath'") }} 28 | {{- end -}} 29 | 30 | {{- if and (.Values.ldapPasswordMountPath | trim) (not (.Values.ldapPasswordFromSecret | trim)) -}} 31 | {{ fail (printf "Please define 'ldapPasswordFromSecret'") }} 32 | {{- end -}} 33 | {{- end -}} 34 | 35 | {{/* checks if ldapPasswordMountPath is set or not when ldapPasswordFromSecret is defined */}} 36 | {{- define "neo4j.ldapVolumeMount" -}} 37 | {{- if and (.Values.ldapPasswordFromSecret | trim) (.Values.ldapPasswordMountPath | trim) }} 38 | - mountPath: "{{ .Values.ldapPasswordMountPath }}" 39 | readOnly: true 40 | name: neo4j-ldap-password 41 | {{- end }} 42 | {{- end -}} 43 | 44 | {{- define "neo4j.ldapVolume" -}} 45 | {{- if and (.Values.ldapPasswordFromSecret | trim) (.Values.ldapPasswordMountPath | trim) }} 46 | - name: neo4j-ldap-password 47 | secret: 48 | secretName: "{{- .Values.ldapPasswordFromSecret -}}" 49 | {{- end }} 50 | {{- end -}} 51 | -------------------------------------------------------------------------------- /neo4j-reverse-proxy/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.fullname" -}} 2 | {{- if .Values.fullnameOverride -}} 3 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 4 | {{- else -}} 5 | {{- if .Values.nameOverride -}} 6 | {{- $name := default .Chart.Name .Values.nameOverride -}} 7 | {{- if contains $name .Release.Name -}} 8 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 9 | {{- else -}} 10 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 11 | {{- end -}} 12 | {{- else -}} 13 | {{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" -}} 14 | {{- end -}} 15 | {{- end -}} 16 | {{- end -}} 17 | 18 | {{- define "neo4j.annotations" -}} 19 | {{- if not (empty .) }} 20 | annotations: 21 | {{- with . -}} 22 | {{- range $name, $value := . }} 23 | {{ $name | quote }}: {{ $value | quote }} 24 | {{- end }} 25 | {{- end -}} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- define "neo4j.ingress.tls" -}} 30 | {{- if and $.Values.reverseProxy.ingress.tls.enabled $.Values.reverseProxy.ingress.tls.config }} 31 | tls: {{ toYaml $.Values.reverseProxy.ingress.tls.config | nindent 2 }} 32 | {{- end }} 33 | {{- end -}} 34 | 35 | {{- define "neo4j.reverseProxy.port" -}} 36 | {{- if $.Values.reverseProxy.ingress.tls.enabled }} 37 | {{- printf "%d" 443 -}} 38 | {{- else -}} 39 | {{- printf "%d" 80 -}} 40 | {{- end -}} 41 | {{- end -}} 42 | 43 | {{- define "neo4j.reverseProxy.ingressName" -}} 44 | {{- $ingressName := printf "%s-reverseproxy-ingress" (include "neo4j.fullname" .) -}} 45 | {{- printf "$(kubectl get ingress/%s -n %s -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" $ingressName .Release.Namespace -}} 46 | {{- end -}} 47 | 48 | {{- define ".neo4j.ingress.host" -}} 49 | {{- if and (not (kindIs "invalid" $.Values.reverseProxy.ingress.host)) (not (empty $.Values.reverseProxy.ingress.host)) }} 50 | host: {{ $.Values.reverseProxy.ingress.host | quote }} 51 | {{- end }} 52 | {{- end -}} 53 | 54 | {{- define "neo4j.labels" -}} 55 | {{- with . -}} 56 | {{- range $name, $value := . }} 57 | {{ $name }}: "{{ $value }}" 58 | {{- end -}} 59 | {{- end -}} 60 | {{- end }} 61 | 62 | {{- define "neo4j.nodeSelector" -}} 63 | {{- if and (not (kindIs "invalid" .Values.reverseProxy.nodeSelector) ) (not (empty .Values.reverseProxy.nodeSelector) ) }} 64 | nodeSelector: {{ .Values.reverseProxy.nodeSelector | toYaml | nindent 2 }} 65 | {{- end }} 66 | {{- end }} 67 | -------------------------------------------------------------------------------- /internal/unit_tests/helm_template_standalone_test.go: -------------------------------------------------------------------------------- 1 | package unit_tests 2 | 3 | import ( 4 | "github.com/neo4j/helm-charts/internal/model" 5 | "github.com/stretchr/testify/assert" 6 | appsv1 "k8s.io/api/apps/v1" 7 | v1 "k8s.io/api/core/v1" 8 | "testing" 9 | ) 10 | 11 | // Tests the "default" behaviour that you get if you don't pass in *any* other values and the helm chart defaults are used 12 | func TestDefaultCommunityHelmTemplate(t *testing.T) { 13 | t.Parallel() 14 | 15 | helmValues := model.HelmValues{ 16 | Neo4J: model.Neo4J{ 17 | Name: "test", 18 | }, 19 | Volumes: model.Volumes{ 20 | Data: model.Data{ 21 | Mode: "selector", 22 | DisableSubPath: false, 23 | }, 24 | }, 25 | } 26 | manifest, err := model.HelmTemplateFromStruct(t, model.HelmChart, helmValues) 27 | if !assert.NoError(t, err) { 28 | return 29 | } 30 | checkNeo4jManifest(t, manifest, 3) 31 | 32 | neo4jStatefulSet := manifest.First(&appsv1.StatefulSet{}).(*appsv1.StatefulSet) 33 | neo4jStatefulSet.GetName() 34 | assert.NotEmpty(t, neo4jStatefulSet.Spec.Template.Spec.Containers) 35 | for _, container := range neo4jStatefulSet.Spec.Template.Spec.Containers { 36 | assert.NotContains(t, container.Image, "enterprise") 37 | assert.Equal(t, "1", container.Resources.Requests.Cpu().String()) 38 | assert.Equal(t, "2Gi", container.Resources.Requests.Memory().String()) 39 | } 40 | for _, container := range neo4jStatefulSet.Spec.Template.Spec.InitContainers { 41 | assert.NotContains(t, container.Image, "enterprise") 42 | } 43 | 44 | envConfigMap := manifest.OfTypeWithName(&v1.ConfigMap{}, model.DefaultHelmTemplateReleaseName.EnvConfigMapName()).(*v1.ConfigMap) 45 | assert.Equal(t, envConfigMap.Data["NEO4J_EDITION"], "COMMUNITY_K8S") 46 | } 47 | 48 | func TestExplicitCommunityHelmTemplate(t *testing.T) { 49 | t.Parallel() 50 | helmValues := model.DefaultCommunityValues 51 | manifest, err := model.HelmTemplateFromStruct(t, model.HelmChart, helmValues) 52 | if !assert.NoError(t, err) { 53 | return 54 | } 55 | 56 | checkNeo4jManifest(t, manifest, 3) 57 | 58 | neo4jStatefulSet := manifest.First(&appsv1.StatefulSet{}).(*appsv1.StatefulSet) 59 | neo4jStatefulSet.GetName() 60 | for _, container := range neo4jStatefulSet.Spec.Template.Spec.Containers { 61 | assert.NotContains(t, container.Image, "enterprise") 62 | } 63 | for _, container := range neo4jStatefulSet.Spec.Template.Spec.InitContainers { 64 | assert.NotContains(t, container.Image, "enterprise") 65 | } 66 | 67 | envConfigMap := manifest.OfTypeWithName(&v1.ConfigMap{}, model.DefaultHelmTemplateReleaseName.EnvConfigMapName()).(*v1.ConfigMap) 68 | assert.Equal(t, envConfigMap.Data["NEO4J_EDITION"], "COMMUNITY_K8S") 69 | } 70 | -------------------------------------------------------------------------------- /neo4j/templates/_image.tpl: -------------------------------------------------------------------------------- 1 | {{- define "neo4j.defaultChartImage" -}} 2 | {{- $isEnterprise := required "neo4j.edition must be specified" .Values.neo4j.edition | regexMatch "(?i)enterprise" -}} 3 | {{- $imageName := "neo4j:" -}} 4 | {{/* .Chart.AppVersion is set to "-" for headless and loadbalancer service*/}} 5 | {{- if eq $.Chart.AppVersion "-" -}} 6 | {{- $imageName = printf "%s%s" $imageName $.Chart.Version -}} 7 | {{- else -}} 8 | {{- $imageName = printf "%s%s" $imageName $.Chart.AppVersion -}} 9 | {{- end -}} 10 | {{- if $isEnterprise -}} 11 | {{- $imageName = printf "%s%s" $imageName "-enterprise" -}} 12 | {{- end -}} 13 | {{- $imageName -}} 14 | {{- end -}} 15 | 16 | 17 | {{- define "neo4j.image" -}} 18 | {{- template "neo4j.checkLicenseAgreement" . -}} 19 | 20 | {{/* Validation: cannot use both customImage and separated fields */}} 21 | {{- if and .Values.image.customImage (or .Values.image.registry .Values.image.repository .Values.image.tag) -}} 22 | {{- fail "Cannot use both image.customImage and separated image fields (image.registry, image.repository, image.tag). Choose only one method." -}} 23 | {{- end -}} 24 | 25 | {{/* Validation: repository must be set if using separated fields */}} 26 | {{- if and (or .Values.image.registry .Values.image.tag .Values.image.repository) (eq (trim (.Values.image.repository | default "")) "") -}} 27 | {{- fail "image.repository must be set if using separated image fields" -}} 28 | {{- end -}} 29 | 30 | {{- $default_image := include "neo4j.defaultChartImage" . -}} 31 | {{- $image := "" -}} 32 | 33 | {{- if .Values.image.customImage -}} 34 | {{- $image = .Values.image.customImage -}} 35 | {{- else -}} 36 | {{- $registry := .Values.image.registry | default "" -}} 37 | {{- $repository := .Values.image.repository | default "neo4j" -}} 38 | {{- $tag := .Values.image.tag | default (regexReplaceAll "^neo4j:" $default_image "") -}} 39 | {{- $image = printf "%s%s:%s" (ternary (printf "%s/" $registry) "" (ne $registry "")) $repository $tag -}} 40 | {{- end -}} 41 | 42 | {{ $image }} 43 | {{- end -}} 44 | 45 | {{- define "cleanup.image" -}} 46 | {{- with .Values.services.neo4j.cleanup.image -}} 47 | {{- $registryName := .registry -}} 48 | {{- $repositoryName := .repository -}} 49 | {{- $separator := ":" -}} 50 | {{- $termination := printf "%s.%s" $.Capabilities.KubeVersion.Major (regexReplaceAll "\\D+" $.Capabilities.KubeVersion.Minor "") -}} 51 | {{- if not (empty (.tag | trim)) -}} 52 | {{- $termination := .tag | toString -}} 53 | {{- end -}} 54 | {{- if .digest }} 55 | {{- $separator = "@" -}} 56 | {{- $termination = .digest | toString -}} 57 | {{- end -}} 58 | {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} 59 | {{- end -}} 60 | {{- end -}} 61 | -------------------------------------------------------------------------------- /internal/integration_tests/cluster_model.go: -------------------------------------------------------------------------------- 1 | package integration_tests 2 | 3 | import ( 4 | . "github.com/neo4j/helm-charts/internal/helpers" 5 | "github.com/neo4j/helm-charts/internal/integration_tests/gcloud" 6 | "github.com/neo4j/helm-charts/internal/model" 7 | "testing" 8 | ) 9 | 10 | type parallelResult struct { 11 | Closeable 12 | error 13 | } 14 | 15 | type helmComponent interface { 16 | Name() model.ReleaseName 17 | Install(t *testing.T) parallelResult 18 | } 19 | 20 | type clusterCore struct { 21 | name model.ReleaseName 22 | extraHelmInstallArgs []string 23 | } 24 | 25 | func (c clusterCore) Name() model.ReleaseName { 26 | return c.name 27 | } 28 | 29 | func (c clusterCore) Install(t *testing.T) parallelResult { 30 | var err error 31 | var cleanup Closeable 32 | cleanup, err = InstallNeo4jInGcloud(t, gcloud.CurrentZone(), gcloud.CurrentProject(), c.name, model.HelmChart, c.extraHelmInstallArgs...) 33 | return parallelResult{cleanup, err} 34 | } 35 | 36 | type clusterReadReplica struct { 37 | name model.ReleaseName 38 | extraHelmInstallArgs []string 39 | } 40 | 41 | func (c clusterReadReplica) Name() model.ReleaseName { 42 | return c.name 43 | } 44 | 45 | func (c clusterReadReplica) Install(t *testing.T) parallelResult { 46 | var err error 47 | var cleanup Closeable 48 | cleanup, err = InstallNeo4jInGcloud(t, gcloud.CurrentZone(), gcloud.CurrentProject(), c.name, model.ClusterReadReplicaHelmChart, c.extraHelmInstallArgs...) 49 | return parallelResult{cleanup, err} 50 | } 51 | 52 | type clusterLoadBalancer struct { 53 | name model.ReleaseName 54 | extraHelmInstallArgs []string 55 | } 56 | 57 | func (c clusterLoadBalancer) Name() model.ReleaseName { 58 | return c.name 59 | } 60 | 61 | func (c clusterLoadBalancer) Install(t *testing.T) parallelResult { 62 | var err error 63 | var cleanup Closeable 64 | cleanup = func() error { 65 | return run(t, "helm", model.LoadBalancerHelmCommand("uninstall", c.name, c.extraHelmInstallArgs...)...) 66 | } 67 | err = run(t, "helm", model.LoadBalancerHelmCommand("install", c.name, c.extraHelmInstallArgs...)...) 68 | return parallelResult{cleanup, err} 69 | } 70 | 71 | type clusterHeadLessService struct { 72 | name model.ReleaseName 73 | extraHelmInstallArgs []string 74 | } 75 | 76 | func (c clusterHeadLessService) Name() model.ReleaseName { 77 | return c.name 78 | } 79 | 80 | func (c clusterHeadLessService) Install(t *testing.T) parallelResult { 81 | var err error 82 | var cleanup Closeable 83 | cleanup = func() error { 84 | return run(t, "helm", model.HeadlessServiceHelmCommand("uninstall", c.name, c.extraHelmInstallArgs...)...) 85 | } 86 | err = run(t, "helm", model.HeadlessServiceHelmCommand("install", c.name, c.extraHelmInstallArgs...)...) 87 | return parallelResult{cleanup, err} 88 | } 89 | -------------------------------------------------------------------------------- /internal/resources/resources.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | var TestAntiAffinityRule = newYamlFile("testData/testAntiAffinityRule.yaml") 4 | var PluginsInitContainer = newYamlFile("testData/pluginsInitContainer.yaml") 5 | var JsonLogging = newYamlFile("testData/jsonLogging.yaml") 6 | var AcceptLicenseAgreementBoolYes = newYamlFile("testData/acceptLicenseAgreementBoolYes.yaml") 7 | var AcceptLicenseAgreementBoolTrue = newYamlFile("testData/acceptLicenseAgreementBoolTrue.yaml") 8 | var AcceptLicenseAgreement = newYamlFile("testData/acceptLicenseAgreement.yaml") 9 | var AcceptLicenseAgreementEval = newYamlFile("testData/acceptLicenseAgreementEval.yaml") 10 | var ApocCorePlugin = newYamlFile("testData/apocCorePlugin.yaml") 11 | var CsvMetrics = newYamlFile("testData/csvMetrics.yaml") 12 | var DefaultStorageClass = newYamlFile("testData/defaultStorageClass.yaml") 13 | var JvmAdditionalSettings = newYamlFile("testData/jvmAdditionalSettings.yaml") 14 | var BoolsInConfig = newYamlFile("testData/boolsInConfig.yaml") 15 | var IntsInConfig = newYamlFile("testData/intsInConfig.yaml") 16 | var ChmodInitContainer = newYamlFile("testData/chmodInitContainer.yaml") 17 | var ChmodInitContainerAndCustomInitContainer = newYamlFile("testData/chmodInitContainerAndCustomInitContainer.yaml") 18 | var ReadReplicaUpstreamStrategy = newYamlFile("testData/read_replica_upstream_selection_strategy.yaml") 19 | var ExcludeLoadBalancer = newYamlFile("testData/excludeLoadBalancer.yaml") 20 | var EmptyImageCredentials = newYamlFile("testData/imagePullSecret/emptyImageCreds.yaml") 21 | var DuplicateImageCredentials = newYamlFile("testData/imagePullSecret/duplicateImageCreds.yaml") 22 | var MissingImageCredentials = newYamlFile("testData/imagePullSecret/missingImageCreds.yaml") 23 | var EmptyImagePullSecrets = newYamlFile("testData/imagePullSecret/emptyImagePullSecrets.yaml") 24 | var InvalidNodeSelectorLabels = newYamlFile("testData/nodeselector.yaml") 25 | var ApocConfig = newYamlFile("testData/apocConfig.yaml") 26 | var ApocClusterTestConfig = newYamlFile("testData/apocClusterTest.yaml") 27 | var PodSpecAnnotations = newYamlFile("testData/podSpecAnnotations.yaml") 28 | var StatefulSetAnnotations = newYamlFile("testData/statefulSetAnnotations.yaml") 29 | var PriorityClassName = newYamlFile("testData/priorityClassName.yaml") 30 | var AdditionalVolumesAndMounts = newYamlFile("testData/additionalVolumes.yaml") 31 | var Tolerations = newYamlFile("testData/tolerations.yaml") 32 | var NodeAffinity = newYamlFile("testData/nodeAffinity.yaml") 33 | var GdsStandaloneTest = newYamlFile("testData/gdsStandaloneTest.yaml") 34 | var SecretMounts = newYamlFile("testData/secretMounts.yaml") 35 | var InvalidSecretMounts = newYamlFile("testData/secretMounts/invalidSecretMounts.yaml") 36 | var SeeduriS3SecretMounts = newYamlFile("testData/secretMounts/seeduriS3SecretMounts.yaml") 37 | var EmptySecretMounts = newYamlFile("testData/secretMounts/emptySecretMounts.yaml") 38 | -------------------------------------------------------------------------------- /neo4j-admin/backup/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/neo4j/helm-charts/neo4j-admin/backup 2 | 3 | go 1.23.8 4 | 5 | require ( 6 | cloud.google.com/go/storage v1.43.0 7 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 8 | github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 9 | github.com/aws/aws-sdk-go v1.55.5 10 | github.com/stretchr/testify v1.10.0 11 | google.golang.org/api v0.203.0 12 | k8s.io/utils v0.0.0-20241210054802-24370beab758 13 | ) 14 | 15 | require ( 16 | cloud.google.com/go v0.116.0 // indirect 17 | cloud.google.com/go/auth v0.9.9 // indirect 18 | cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect 19 | cloud.google.com/go/compute/metadata v0.5.2 // indirect 20 | cloud.google.com/go/iam v1.2.1 // indirect 21 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect 22 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect 23 | github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect 24 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 25 | github.com/felixge/httpsnoop v1.0.4 // indirect 26 | github.com/go-logr/logr v1.4.2 // indirect 27 | github.com/go-logr/stdr v1.2.2 // indirect 28 | github.com/golang-jwt/jwt/v5 v5.2.2 // indirect 29 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 30 | github.com/google/s2a-go v0.1.8 // indirect 31 | github.com/google/uuid v1.6.0 // indirect 32 | github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect 33 | github.com/googleapis/gax-go/v2 v2.13.0 // indirect 34 | github.com/jmespath/go-jmespath v0.4.0 // indirect 35 | github.com/kylelemons/godebug v1.1.0 // indirect 36 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect 37 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 38 | go.opencensus.io v0.24.0 // indirect 39 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect 40 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect 41 | go.opentelemetry.io/otel v1.29.0 // indirect 42 | go.opentelemetry.io/otel/metric v1.29.0 // indirect 43 | go.opentelemetry.io/otel/trace v1.29.0 // indirect 44 | golang.org/x/crypto v0.28.0 // indirect 45 | golang.org/x/net v0.30.0 // indirect 46 | golang.org/x/oauth2 v0.23.0 // indirect 47 | golang.org/x/sync v0.8.0 // indirect 48 | golang.org/x/sys v0.26.0 // indirect 49 | golang.org/x/text v0.19.0 // indirect 50 | golang.org/x/time v0.7.0 // indirect 51 | google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect 52 | google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect 53 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect 54 | google.golang.org/grpc v1.67.1 // indirect 55 | google.golang.org/protobuf v1.35.1 // indirect 56 | gopkg.in/yaml.v3 v3.0.1 // indirect 57 | ) 58 | -------------------------------------------------------------------------------- /internal/model/helm_charts.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "path/filepath" 8 | "runtime" 9 | ) 10 | 11 | var _, thisFile, _, _ = runtime.Caller(0) 12 | var modelDir = path.Dir(thisFile) 13 | 14 | var LoadBalancerHelmChart = newHelmChart("neo4j-loadbalancer") 15 | 16 | var HeadlessServiceHelmChart = newHelmChart("neo4j-headless-service") 17 | 18 | var Neo4jHelmChartCommunityAndEnterprise = newNeo4jHelmChart("neo4j", []string{"community", "enterprise"}) 19 | 20 | var HelmChart = newNeo4jHelmChart("neo4j", []string{"enterprise"}) 21 | 22 | var BackupHelmChart = newHelmChart("neo4j-admin") 23 | 24 | var ReverseProxyHelmChart = newHelmChart("neo4j-reverse-proxy") 25 | 26 | var ClusterReadReplicaHelmChart = newNeo4jHelmChart("neo4j", []string{"enterprise"}) 27 | 28 | var PrimaryHelmCharts = []Neo4jHelmChartBuilder{Neo4jHelmChartCommunityAndEnterprise} 29 | 30 | type helmChart struct { 31 | path string 32 | editions []string 33 | } 34 | 35 | type HelmChartBuilder interface { 36 | getPath() string 37 | Name() string 38 | } 39 | 40 | type Neo4jHelmChartBuilder interface { 41 | HelmChartBuilder 42 | GetEditions() []string 43 | SupportsEdition(edition string) bool 44 | } 45 | 46 | func (h *helmChart) getPath() string { 47 | return h.path 48 | } 49 | 50 | func (h *helmChart) Name() string { 51 | dir, file := filepath.Split(h.path) 52 | if file != "" { 53 | return file 54 | } else { 55 | return dir 56 | } 57 | } 58 | 59 | func (h *helmChart) GetEditions() []string { 60 | return h.editions 61 | } 62 | 63 | func (h *helmChart) SupportsEdition(edition string) bool { 64 | for _, supportedEdition := range h.editions { 65 | if edition == supportedEdition { 66 | return true 67 | } 68 | } 69 | return false 70 | } 71 | 72 | func chartExistsAt(path string) (bool, error) { 73 | if fileInfo, err := os.Stat(path); err == nil { 74 | if filepath.Ext(path) == ".yaml" && !fileInfo.IsDir() { 75 | return true, nil 76 | } 77 | if fileInfo.IsDir() { 78 | return chartExistsAt(filepath.Join(path, "Chart.yaml")) 79 | } 80 | return false, fmt.Errorf("unexpected error occured. File %s returned fileInfo: %v", path, fileInfo) 81 | } else { 82 | return false, err 83 | } 84 | } 85 | 86 | func newHelmChart(helmChartName string) HelmChartBuilder { 87 | filepath := path.Join(path.Join(modelDir, "../.."), helmChartName) 88 | if exists, err := chartExistsAt(filepath); err != nil || !exists { 89 | panic(err) 90 | } 91 | return &helmChart{filepath, nil} 92 | } 93 | 94 | func newNeo4jHelmChart(helmChartName string, editions []string) Neo4jHelmChartBuilder { 95 | filepath := path.Join(path.Join(modelDir, "../.."), helmChartName) 96 | if exists, err := chartExistsAt(filepath); err != nil || !exists { 97 | panic(err) 98 | } 99 | return &helmChart{filepath, editions} 100 | } 101 | -------------------------------------------------------------------------------- /neo4j/templates/neo4j-service-account.yaml: -------------------------------------------------------------------------------- 1 | {{- $clusterEnabled := eq (include "neo4j.isClusterEnabled" .) "true" }} 2 | {{- if or (and $clusterEnabled (empty $.Values.podSpec.serviceAccountName)) $.Values.analytics.enabled }} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | namespace: "{{ .Release.Namespace }}" 7 | name: {{ include "neo4j.fullname" . }} 8 | labels: 9 | app: "{{ template "neo4j.name" $ }}" 10 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: Role 14 | metadata: 15 | namespace: "{{ .Release.Namespace }}" 16 | name: "{{ include "neo4j.fullname" . }}-service-reader" 17 | labels: 18 | app: "{{ template "neo4j.name" $ }}" 19 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 20 | rules: 21 | - apiGroups: [""] # "" indicates the core API group 22 | resources: ["services", "endpoints"] 23 | verbs: ["get", "watch", "list"] 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: RoleBinding 27 | metadata: 28 | namespace: "{{ .Release.Namespace }}" 29 | name: "{{ include "neo4j.fullname" . }}-service-binding" 30 | labels: 31 | app: "{{ template "neo4j.name" $ }}" 32 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 33 | subjects: 34 | - kind: ServiceAccount 35 | name: {{ include "neo4j.serviceAccountName" . }} 36 | roleRef: 37 | # "roleRef" specifies the binding to a Role / ClusterRole 38 | kind: Role # this must be Role or ClusterRole 39 | name: {{ include "neo4j.fullname" . }}-service-reader # this must match the name of the Role or ClusterRole you wish to bind to 40 | apiGroup: rbac.authorization.k8s.io 41 | --- 42 | {{- if and (not (kindIs "invalid" $.Values.neo4j.operations)) $.Values.neo4j.operations.enableServer }} 43 | apiVersion: rbac.authorization.k8s.io/v1 44 | kind: Role 45 | metadata: 46 | namespace: "{{ .Release.Namespace }}" 47 | name: "{{ include "neo4j.fullname" . }}-secrets-reader" 48 | labels: 49 | app: "{{ template "neo4j.name" $ }}" 50 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 51 | rules: 52 | - apiGroups: [""] # "" indicates the core API group 53 | resources: ["secrets"] 54 | verbs: ["get", "watch", "list"] 55 | --- 56 | apiVersion: rbac.authorization.k8s.io/v1 57 | kind: RoleBinding 58 | metadata: 59 | namespace: "{{ .Release.Namespace }}" 60 | name: "{{ include "neo4j.fullname" . }}-secrets-binding" 61 | labels: 62 | app: "{{ template "neo4j.name" $ }}" 63 | {{- include "neo4j.labels" $.Values.neo4j | indent 4 }} 64 | subjects: 65 | - kind: ServiceAccount 66 | name: {{ include "neo4j.serviceAccountName" . }} 67 | roleRef: 68 | # "roleRef" specifies the binding to a Role / ClusterRole 69 | kind: Role # this must be Role or ClusterRole 70 | name: {{ include "neo4j.fullname" . }}-secrets-reader # this must match the name of the Role or ClusterRole you wish to bind to 71 | apiGroup: rbac.authorization.k8s.io 72 | {{- end }} 73 | {{- end }} -------------------------------------------------------------------------------- /neo4j-loadbalancer/values.yaml: -------------------------------------------------------------------------------- 1 | # Shared neo4j object 2 | neo4j: 3 | # neo4j.name is required and must match the name of the neo4j instances this service should use 4 | name: "neo4j-loadbalancer" 5 | edition: "enterprise" 6 | 7 | # Service name configuration 8 | serviceName: 9 | # Suffix for the load balancer service name. 10 | # The full service name will be: {{ neo4j.name }}-{{ suffix }} 11 | # This allows multiple load balancers in the same namespace when using different suffixes 12 | suffix: "lb-neo4j" 13 | 14 | # Annotations for the external service 15 | annotations: { } 16 | 17 | # Neo4j ports to include in external service 18 | ports: 19 | http: 20 | enabled: true #Set this to false to remove HTTP from this service (this does not affect whether http is enabled for the neo4j process) 21 | # uncomment to publish http on port 80 (neo4j default is 7474) 22 | #port: 80 23 | #targetPort: 7474 24 | #name: "http" 25 | #nodePort: , enabled only when type set to NodePort 26 | https: 27 | enabled: true #Set this to false to remove HTTPS from this service (this does not affect whether https is enabled for the neo4j process) 28 | # uncomment to publish http on port 443 (neo4j default is 7474) 29 | #port: 443 30 | #targetPort: 7473 31 | #name: "https" 32 | #nodePort: , enabled only when type set to NodePort 33 | bolt: 34 | enabled: true #Set this to false to remove BOLT from this service (this does not affect whether https is enabled for the neo4j process) 35 | # Uncomment to explicitly specify the port to publish Neo4j Bolt (7687 is the default) 36 | #port: 7687 37 | #targetPort: 7687 38 | #name: "tcp-bolt" 39 | #nodePort: , enabled only when type set to NodePort 40 | backup: 41 | enabled: false #Set this to true to expose backup port externally (n.b. this could have security implications. Backup is not authenticated by default) 42 | # Uncomment to explicitly specify the port to publish Neo4j Backup (6362 is the default) 43 | #port: 6362 44 | #targetPort: 6362 45 | #name: "tcp-backup" 46 | #nodePort: , enabled only when type set to NodePort 47 | 48 | # A "helm.neo4j.com/neo4j.name" will be applied automatically from `neo4j.name`. 49 | # Specify *additional* selectors to apply here (generally not required). 50 | # If you do not want the automatic selector rename to "selectorOverride" and no "helm.neo4j.com/neo4j.name" will be applied. 51 | selector: 52 | "helm.neo4j.com/neo4j.loadbalancer": "include" 53 | # for neo4j cluster enable this selector 54 | # helm.neo4j.com/clustering: "true" 55 | 56 | # Add additional Service.spec here if needed 57 | spec: 58 | type: LoadBalancer 59 | # in most cloud environments LoadBalancer type will receive an ephemeral public IP address automatically. 60 | # If you need to specify a static ip here use: 61 | #loadBalancerIP: ... 62 | 63 | # Kubernetes cluster domain suffix 64 | clusterDomain: "cluster.local" 65 | 66 | #this flag allows you to open internal neo4j ports necessary in multi zone /region neo4j cluster scenario 67 | multiCluster: false 68 | -------------------------------------------------------------------------------- /bin/gcloud/cleanup_resources: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script cleans up GCP resources that might be left over from previous test runs 4 | 5 | set -o pipefail -o errtrace -o errexit -o nounset 6 | shopt -s inherit_errexit 7 | [[ -n "${TRACE:-}" ]] && set -o xtrace 8 | 9 | # Required env vars 10 | CLOUDSDK_COMPUTE_ZONE="${CLOUDSDK_COMPUTE_ZONE:?CLOUDSDK_COMPUTE_ZONE is required}" 11 | RESOURCE_PREFIX="${RESOURCE_PREFIX:-ghactions}" 12 | 13 | # Clean up any existing clusters 14 | echo "Cleaning up any existing clusters..." 15 | CLUSTERS=$(gcloud container clusters list --format="get(name)" --filter="name ~ ^${RESOURCE_PREFIX}-.*") 16 | if [ -n "$CLUSTERS" ]; then 17 | while read -r cluster; do 18 | if [ -n "$cluster" ]; then 19 | echo "Deleting cluster: $cluster" 20 | gcloud container clusters delete "$cluster" --zone="${CLOUDSDK_COMPUTE_ZONE}" --quiet || true 21 | fi 22 | done <<< "$CLUSTERS" 23 | fi 24 | 25 | # Clean up any orphaned persistent disks 26 | echo "Cleaning up orphaned persistent disks..." 27 | DISKS=$(gcloud compute disks list \ 28 | --filter="zone=${CLOUDSDK_COMPUTE_ZONE} AND (name:${RESOURCE_PREFIX}-* OR name:neo4j-data-disk-${RESOURCE_PREFIX}-*)" \ 29 | --format="get(name)") 30 | if [ -n "$DISKS" ]; then 31 | while read -r disk; do 32 | if [ -n "$disk" ]; then 33 | echo "Deleting disk: $disk" 34 | gcloud compute disks delete "$disk" --zone="${CLOUDSDK_COMPUTE_ZONE}" --quiet || true 35 | fi 36 | done <<< "$DISKS" 37 | fi 38 | 39 | # Clean up test-related container images older than 24 hours 40 | if [[ -n "${ARTIFACT_REGISTRY_REPO_NAME:-}" ]]; then 41 | echo "Cleaning up old test-related container images..." 42 | CUTOFF_TIME=$(date -u -d "24 hours ago" +"%Y-%m-%dT%H:%M:%SZ") 43 | 44 | # Define the test-related image patterns 45 | TEST_IMAGE_PATTERNS=( 46 | "reverseproxy" 47 | "neo4j-operations" 48 | "neo4j-admin" 49 | ) 50 | 51 | for pattern in "${TEST_IMAGE_PATTERNS[@]}"; do 52 | 53 | IMAGES=$(gcloud container images list --repository="${ARTIFACT_REGISTRY_REPO_NAME}" \ 54 | --format="get(name)" --filter="name ~ .*${pattern}.*" 2>/dev/null || true) 55 | 56 | if [ -n "$IMAGES" ]; then 57 | while read -r image; do 58 | if [ -n "$image" ]; then 59 | OLD_TAGS=$(gcloud container images list-tags "$image" \ 60 | --filter="timestamp.datetime < '${CUTOFF_TIME}'" \ 61 | --format="get(digest)" 2>/dev/null || true) 62 | 63 | while read -r digest; do 64 | if [ -n "$digest" ]; then 65 | echo "Deleting old image: $image@$digest" 66 | gcloud container images delete --quiet "${image}@${digest}" 2>/dev/null || true 67 | fi 68 | done <<< "$OLD_TAGS" 69 | fi 70 | done <<< "$IMAGES" 71 | fi 72 | done 73 | else 74 | echo "Skipping image cleanup - ARTIFACT_REGISTRY_REPO_NAME not set" 75 | fi -------------------------------------------------------------------------------- /neo4j-reverse-proxy/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for neo4j reverse proxy helm chart 2 | 3 | ## @param nameOverride String to partially override common.names.fullname 4 | nameOverride: "" 5 | ## @param fullnameOverride String to fully override common.names.fullname 6 | fullnameOverride: "" 7 | 8 | # Parameters for reverse proxy 9 | reverseProxy: 10 | image: "neo4j/helm-charts-reverse-proxy:2025.10.1-4" 11 | 12 | # imagePullSecrets for pulling the reverse proxy image from private registries 13 | imagePullSecrets: [] 14 | # - name: my-docker-secret 15 | 16 | # Name of the kubernetes service. This service should have the ports 7474 and 7687 open. 17 | # This could be the admin service ex: "standalone-admin" or the loadbalancer service ex: "standalone" created via the neo4j helm chart 18 | # serviceName , namespace , domain together will form the complete k8s service url. Ex: standalone-admin.default.svc.cluster.local 19 | # When used against a cluster ensure the service being used is pointing to all the cluster instances. 20 | # This could be the loadbalancer from neo4j helm chart or the headless service installed via neo4j-headless-service helm chart 21 | serviceName: "" 22 | # Namespace where the Neo4j service is running. If not specified, defaults to the namespace where the reverse proxy is installed. 23 | # This allows installing the reverse proxy in one namespace while connecting to Neo4j in another namespace. 24 | namespace: "" 25 | # default is set to cluster.local 26 | domain: "cluster.local" 27 | 28 | # securityContext defines privilege and access control settings for a Container. Making sure that we dont run Neo4j as root user. 29 | containerSecurityContext: 30 | allowPrivilegeEscalation: false 31 | runAsNonRoot: true 32 | runAsUser: 7474 33 | runAsGroup: 7474 34 | capabilities: 35 | drop: 36 | - all 37 | 38 | podSecurityContext: 39 | runAsNonRoot: true 40 | runAsUser: 7474 41 | runAsGroup: 7474 42 | fsGroup: 7474 43 | fsGroupChangePolicy: "Always" 44 | 45 | # Pod labels for security policies and workload identification 46 | podLabels: {} 47 | # app: "reverse-proxy" 48 | # environment: "production" 49 | 50 | # Node selector for workload segregation 51 | # please ensure the respective labels are present on one of nodes or else helm charts will throw an error 52 | nodeSelector: {} 53 | # label1: "value1" 54 | # label2: "value2" 55 | 56 | # This assumes ingress-nginx controller or haproxy-ingress-controller is already installed in your kubernetes cluster. 57 | # You can install ingress-nginx by following instructions on this link https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/index.md#quick-start 58 | # You can install haproxy-ingress by following instructions on this link https://haproxy-ingress.github.io/docs/getting-started/ 59 | ingress: 60 | enabled: true 61 | #default value is nginx. It can be either nginx or haproxy 62 | className: nginx 63 | annotations: {} 64 | # "demo": "value" 65 | # "demo2": "value2" 66 | host: "" 67 | tls: 68 | enabled: false 69 | config: [] 70 | # - secretName: "demo2" 71 | # hosts: 72 | # - localhost 73 | -------------------------------------------------------------------------------- /internal/integration_tests/standalone_test.go: -------------------------------------------------------------------------------- 1 | package integration_tests 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/neo4j/helm-charts/internal/integration_tests/gcloud" 9 | "github.com/neo4j/helm-charts/internal/model" 10 | "github.com/neo4j/helm-charts/internal/resources" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | // Install Neo4j on the provided GKE K8s cluster and then run the tests from the table above using it 15 | func TestInstallStandaloneOnGCloudK8s(t *testing.T) { 16 | releaseName := model.NewReleaseName("install-" + TestRunIdentifier) 17 | chart := model.Neo4jHelmChartCommunityAndEnterprise 18 | 19 | t.Parallel() 20 | t.Logf("Starting setup of '%s'", t.Name()) 21 | defaultHelmArgs := []string{} 22 | defaultHelmArgs = append(defaultHelmArgs, model.DefaultNeo4jNameArg...) 23 | defaultHelmArgs = append(defaultHelmArgs, resources.TestAntiAffinityRule.HelmArgs()...) 24 | defaultHelmArgs = append(defaultHelmArgs, resources.GdsStandaloneTest.HelmArgs()...) 25 | _, err := installNeo4j(t, releaseName, chart, defaultHelmArgs...) 26 | t.Cleanup(standaloneCleanup(t, releaseName)) 27 | 28 | if !assert.NoError(t, err) { 29 | t.Logf("%#v", err) 30 | return 31 | } 32 | 33 | t.Logf("Succeeded with setup of '%s'", t.Name()) 34 | 35 | subTests, err := k8sTests(releaseName, chart) 36 | if !assert.NoError(t, err) { 37 | return 38 | } 39 | runSubTests(t, subTests) 40 | } 41 | 42 | func standaloneCleanup(t *testing.T, releaseName model.ReleaseName) func() { 43 | return func() { 44 | namespace := string(releaseName.Namespace()) 45 | 46 | err := run(t, "kubectl", "get", "namespace", namespace) 47 | if err == nil { 48 | _ = run(t, "kubectl", "get", "statefulset", releaseName.String(), "--namespace", namespace) 49 | if err == nil { 50 | _ = runAll(t, "kubectl", [][]string{ 51 | {"scale", "statefulset", releaseName.String(), "--namespace", namespace, "--replicas=0"}, 52 | }, false) 53 | 54 | time.Sleep(30 * time.Second) 55 | } 56 | 57 | _ = runAll(t, "helm", [][]string{ 58 | {"uninstall", releaseName.String(), "--wait", "--timeout", "3m", "--namespace", namespace}, 59 | }, false) 60 | 61 | time.Sleep(10 * time.Second) 62 | 63 | _ = runAll(t, "kubectl", [][]string{ 64 | {"delete", "statefulset", releaseName.String(), "--namespace", namespace, "--force", "--grace-period=0", "--ignore-not-found"}, 65 | {"delete", "pod", "--all", "--namespace", namespace, "--force", "--grace-period=0", "--ignore-not-found"}, 66 | {"delete", "pvc", "--all", "--namespace", namespace, "--force", "--grace-period=0", "--ignore-not-found"}, 67 | }, false) 68 | 69 | _ = runAll(t, "kubectl", [][]string{ 70 | {"delete", "pv", "--all", "--force", "--grace-period=0", "--ignore-not-found"}, 71 | }, false) 72 | 73 | _ = runAll(t, "kubectl", [][]string{ 74 | {"delete", "namespace", namespace, "--force", "--grace-period=0", "--ignore-not-found"}, 75 | }, false) 76 | } 77 | 78 | _ = runAll(t, "gcloud", [][]string{ 79 | {"compute", "disks", "delete", fmt.Sprintf("neo4j-data-disk-%s", releaseName), "--zone=" + string(gcloud.CurrentZone()), "--project=" + string(gcloud.CurrentProject()), "--quiet"}, 80 | }, false) 81 | } 82 | } 83 | --------------------------------------------------------------------------------