├── .dockerignore ├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── README.rst ├── api └── v1alpha1 │ ├── common.go │ ├── fabricorderer_types.go │ ├── fabricpeer_types.go │ ├── groupversion_info.go │ └── zz_generated.deepcopy.go ├── config ├── crd │ ├── bases │ │ ├── fabric.kompitech.com_fabricorderers.yaml │ │ └── fabric.kompitech.com_fabricpeers.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_fabricorderers.yaml │ │ ├── cainjection_in_fabricpeers.yaml │ │ ├── webhook_in_fabricorderers.yaml │ │ └── webhook_in_fabricpeers.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── manager │ ├── controller_manager_config.yaml │ └── manager.yaml ├── manifests │ └── kustomization.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── fabricorderer_editor_role.yaml │ ├── fabricorderer_viewer_role.yaml │ ├── fabricpeer_editor_role.yaml │ ├── fabricpeer_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── manager-cluster-role.yml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── samples │ ├── fabric_v1alpha1_fabricorderer.yaml │ ├── fabric_v1alpha1_fabricpeer.yaml │ └── kustomization.yaml └── scorecard │ ├── bases │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ ├── basic.config.yaml │ └── olm.config.yaml ├── controllers ├── fabricorderer_controller.go ├── fabricpeer_controller.go └── suite_test.go ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt ├── main.go ├── pkg ├── config │ └── config.go └── resources │ ├── common.go │ ├── dns.go │ ├── istio.go │ └── vault.go └── version └── version.go /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | testbin/ 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Kubernetes Generated files - skip generated files, except for vendored files 18 | 19 | !vendor/**/zz_generated.* 20 | 21 | # editor and IDE paraphernalia 22 | .idea 23 | *.swp 24 | *.swo 25 | *~ 26 | config/manager/kustomization.yaml 27 | bin/ 28 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | notifications: 2 | slack: 3 | secure: pL3qG5hXFRrYM+fVQSJdvfrEuEkLTzm6S6l6LvedPFkmd0BNhN/bNjooMWvHoHgHyaLZEIrvx5m9MyTgKgLI5WfLL+fkO0vnU1w1A//5sPN6oZlGPLeB4SRcWYtEM+AvQyuaiCIQDDCXvFTp9AxNH73a80+qZFsluaW+QUdqm0LFdxXTwyzmdHy4zBUmfd9TQ+Nrhxf6bEsa0ADV3FsBLDJKSjTvIYF7eqJbWsajHpnIFPGo/6Hj5G/cxjSxfpc/LTLYwaSt4fX8k6vjmLODJeDFnaRqbfpjpqWRTQTDRJXM/CgbFqKpAr0hOcRLWsXdgnv1XOljDl8jarkZA2vCFHaV88CShss1kmgq2Pm8VvXwcgCETn6A3kVbYLhtYkWJJMI38Ka1j78yqG3UTYi9txa9mC0OiYCw5fXXVEXZ/Kl/3xEAgDT68fssDXQfaDbU333E/e3ynnMofZ8pkZos+2vpJn7VLJP5TQbjqvop1OR4mpSZM9sxE4vgbSrPwq3RMAYYxsPznVETwcLlWghhlhvodWQ24aBvEysMZ4JnjxArf1SK5e7ucuIFEOGGVu13R8uBlAOz7fbR2P8rNxGH9KrOzbc3uYTqHSNMaSfQFO6CRDDnB1GRW/eRr9LrvRA2+6WmstrClTQz7SI2B3nhEungKs6LfrvqGKg8p5tuzt8= 4 | dist: xenial 5 | sudo: false 6 | env: 7 | - GO111MODULE=on 8 | language: go 9 | before_install: 10 | - git config --global url."git@github.com:KompiTech/".insteadOf "https://github.com/KompiTech/" 11 | go: 12 | - 1.16.x 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.16 as builder 3 | 4 | WORKDIR /workspace 5 | # Copy the Go Modules manifests 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | # cache deps before building and copying source so that we don't need to re-download as much 9 | # and so that source changes don't invalidate our downloaded layer 10 | RUN go mod download 11 | 12 | # Copy the go source 13 | COPY main.go main.go 14 | COPY api/ api/ 15 | COPY controllers/ controllers/ 16 | COPY pkg/ pkg/ 17 | 18 | # Build 19 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go 20 | 21 | # Use distroless as minimal base image to package the manager binary 22 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 23 | FROM gcr.io/distroless/static:nonroot 24 | WORKDIR / 25 | COPY --from=builder /workspace/manager . 26 | USER 65532:65532 27 | 28 | ENTRYPOINT ["/manager"] 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2019 KompiTech GmbH 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # VERSION defines the project version for the bundle. 2 | # Update this value when you upgrade the version of your project. 3 | # To re-generate a bundle for another specific version without changing the standard setup, you can: 4 | # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) 5 | # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) 6 | VERSION ?= 0.0.1-prototype0 7 | VERSION_IMPORT=main 8 | COMMIT=$(shell git rev-parse HEAD) 9 | COMMIT_SHORT=$(shell git rev-parse --short HEAD) 10 | BRANCH=$(shell git rev-parse --abbrev-ref HEAD) 11 | IMAGE?=hl-fabric-operator 12 | IMG_REPO?=kompitech/ 13 | IMG_TAG?=${BRANCH}-${COMMIT_SHORT} 14 | IMG_TAG_LATEST=latest 15 | IMG=${IMG_REPO}${IMAGE}:${IMG_TAG} 16 | PKG_NAME?=hl-fabric-operator 17 | LDFLAGS=-ldflags '-X ${VERSION_IMPORT}.version=${VERSION} -X ${VERSION_IMPORT}.commit=${COMMIT} -X ${VERSION_IMPORT}.branch=${BRANCH}' 18 | 19 | # CHANNELS define the bundle channels used in the bundle. 20 | # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") 21 | # To re-generate a bundle for other specific channels without changing the standard setup, you can: 22 | # - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) 23 | # - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") 24 | ifneq ($(origin CHANNELS), undefined) 25 | BUNDLE_CHANNELS := --channels=$(CHANNELS) 26 | endif 27 | 28 | # DEFAULT_CHANNEL defines the default channel used in the bundle. 29 | # Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") 30 | # To re-generate a bundle for any other default channel without changing the default setup, you can: 31 | # - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) 32 | # - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") 33 | ifneq ($(origin DEFAULT_CHANNEL), undefined) 34 | BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) 35 | endif 36 | BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) 37 | 38 | # IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. 39 | # This variable is used to construct full image tags for bundle and catalog images. 40 | # 41 | # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both 42 | # kompitech.com/hl-fabric-operator-bundle:$VERSION and kompitech.com/hl-fabric-operator-catalog:$VERSION. 43 | IMAGE_TAG_BASE ?= kompitech.com/hl-fabric-operator 44 | 45 | # BUNDLE_IMG defines the image:tag used for the bundle. 46 | # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) 47 | BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) 48 | 49 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) 50 | CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" 51 | # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. 52 | ENVTEST_K8S_VERSION = 1.21 53 | 54 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 55 | ifeq (,$(shell go env GOBIN)) 56 | GOBIN=$(shell go env GOPATH)/bin 57 | else 58 | GOBIN=$(shell go env GOBIN) 59 | endif 60 | 61 | # Setting SHELL to bash allows bash commands to be executed by recipes. 62 | # This is a requirement for 'setup-envtest.sh' in the test target. 63 | # Options are set to exit when a recipe line exits non-zero or a piped command fails. 64 | SHELL = /usr/bin/env bash -o pipefail 65 | .SHELLFLAGS = -ec 66 | 67 | all: build 68 | 69 | ##@ General 70 | 71 | # The help target prints out all targets with their descriptions organized 72 | # beneath their categories. The categories are represented by '##@' and the 73 | # target descriptions by '##'. The awk commands is responsible for reading the 74 | # entire set of makefiles included in this invocation, looking for lines of the 75 | # file as xyz: ## something, and then pretty-format the target and help. Then, 76 | # if there's a line with ##@ something, that gets pretty-printed as a category. 77 | # More info on the usage of ANSI control characters for terminal formatting: 78 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 79 | # More info on the awk command: 80 | # http://linuxcommand.org/lc3_adv_awk.php 81 | 82 | help: ## Display this help. 83 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 84 | 85 | ##@ Development 86 | 87 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 88 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 89 | 90 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 91 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 92 | 93 | fmt: ## Run go fmt against code. 94 | go fmt ./... 95 | 96 | vet: ## Run go vet against code. 97 | go vet ./... 98 | 99 | test: manifests generate fmt vet envtest ## Run tests. 100 | KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out 101 | 102 | ##@ Build 103 | 104 | build: generate fmt vet ## Build manager binary. 105 | go build -o bin/manager ${LDFLAGS} main.go 106 | 107 | run: manifests generate fmt vet ## Run a controller from your host. 108 | go run ./main.go 109 | 110 | docker-build: test ## Build docker image with the manager. 111 | docker build -t ${IMG_REPO}${IMAGE}:${IMG_TAG} -t ${IMG_REPO}${IMAGE}:${IMG_TAG_LATEST} . 112 | 113 | docker-push: ## Push docker image with the manager. 114 | docker push ${IMG_REPO}${IMAGE}:${IMG_TAG} 115 | docker push ${IMG_REPO}${IMAGE}:${IMG_TAG_LATEST} 116 | 117 | ##@ Deployment 118 | 119 | install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 120 | $(KUSTOMIZE) build config/crd | kubectl apply -f - 121 | 122 | uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. 123 | $(KUSTOMIZE) build config/crd | kubectl delete -f - 124 | 125 | deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 126 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 127 | $(KUSTOMIZE) build config/default | kubectl apply -f - 128 | 129 | undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. 130 | $(KUSTOMIZE) build config/default | kubectl delete -f - 131 | 132 | 133 | CONTROLLER_GEN = $(shell pwd)/bin/controller-gen 134 | controller-gen: ## Download controller-gen locally if necessary. 135 | $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.1) 136 | 137 | KUSTOMIZE = $(shell pwd)/bin/kustomize 138 | kustomize: ## Download kustomize locally if necessary. 139 | $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) 140 | 141 | ENVTEST = $(shell pwd)/bin/setup-envtest 142 | envtest: ## Download envtest-setup locally if necessary. 143 | $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) 144 | 145 | # go-get-tool will 'go get' any package $2 and install it to $1. 146 | PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) 147 | define go-get-tool 148 | @[ -f $(1) ] || { \ 149 | set -e ;\ 150 | TMP_DIR=$$(mktemp -d) ;\ 151 | cd $$TMP_DIR ;\ 152 | go mod init tmp ;\ 153 | echo "Downloading $(2)" ;\ 154 | GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ 155 | rm -rf $$TMP_DIR ;\ 156 | } 157 | endef 158 | 159 | .PHONY: bundle 160 | bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files. 161 | operator-sdk generate kustomize manifests -q 162 | cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) 163 | $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) 164 | operator-sdk bundle validate ./bundle 165 | 166 | .PHONY: bundle-build 167 | bundle-build: ## Build the bundle image. 168 | docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . 169 | 170 | .PHONY: bundle-push 171 | bundle-push: ## Push the bundle image. 172 | $(MAKE) docker-push IMG=$(BUNDLE_IMG) 173 | 174 | .PHONY: opm 175 | OPM = ./bin/opm 176 | opm: ## Download opm locally if necessary. 177 | ifeq (,$(wildcard $(OPM))) 178 | ifeq (,$(shell which opm 2>/dev/null)) 179 | @{ \ 180 | set -e ;\ 181 | mkdir -p $(dir $(OPM)) ;\ 182 | OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ 183 | curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\ 184 | chmod +x $(OPM) ;\ 185 | } 186 | else 187 | OPM = $(shell which opm) 188 | endif 189 | endif 190 | 191 | # A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). 192 | # These images MUST exist in a registry and be pull-able. 193 | BUNDLE_IMGS ?= $(BUNDLE_IMG) 194 | 195 | # The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). 196 | CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) 197 | 198 | # Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. 199 | ifneq ($(origin CATALOG_BASE_IMG), undefined) 200 | FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) 201 | endif 202 | 203 | # Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. 204 | # This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: 205 | # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator 206 | .PHONY: catalog-build 207 | catalog-build: opm ## Build a catalog image. 208 | $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) 209 | 210 | # Push the catalog image. 211 | .PHONY: catalog-push 212 | catalog-push: ## Push a catalog image. 213 | $(MAKE) docker-push IMG=$(CATALOG_IMG) 214 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: kompitech.com 2 | layout: 3 | - go.kubebuilder.io/v3 4 | plugins: 5 | manifests.sdk.operatorframework.io/v2: {} 6 | scorecard.sdk.operatorframework.io/v2: {} 7 | projectName: hl-fabric-operator 8 | repo: github.com/KompiTech/hyperledger-fabric-operator 9 | resources: 10 | - api: 11 | crdVersion: v1 12 | controller: true 13 | domain: kompitech.com 14 | group: fabric 15 | kind: FabricPeer 16 | path: github.com/KompiTech/hyperledger-fabric-operator/api/v1alpha1 17 | version: v1alpha1 18 | - api: 19 | crdVersion: v1 20 | controller: true 21 | domain: kompitech.com 22 | group: fabric 23 | kind: FabricOrderer 24 | path: github.com/KompiTech/hyperledger-fabric-operator/api/v1alpha1 25 | version: v1alpha1 26 | version: "3" 27 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: docs/img/operator.jpg 2 | 3 | =========================== 4 | Hyperledger Fabric Operator 5 | =========================== 6 | 7 | **NOTE: This project is in pre-alpha** 8 | 9 | Kubernetes operator for hyperledger fabric. This project is using Kubernetes Custom Resource Definition 10 | (more information https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) 11 | to manage Fabric Peers and Orderers in Kubernetes. 12 | 13 | 14 | Installation 15 | ------------ 16 | 17 | CRD needs to be applied into the k8s cluster first. 18 | 19 | .. code:: bash 20 | 21 | kubectl apply -f deploy/crds/ 22 | 23 | Operator deployment with RBAC 24 | 25 | .. code:: bash 26 | 27 | kubectl apply -f deploy/rbac.yaml 28 | kubectl apply -f deploy/operator.yaml 29 | 30 | 31 | User guide 32 | ---------- 33 | 34 | Pre-requirements: 35 | ================= 36 | 37 | Currently Hyperledger Fabric Operator support only one use case which requires that Kubernetes has 38 | deployed HashiCorp Vault, Istio and CoreDNS. 39 | 40 | HashiCorp Vault is used for issuing signing certificate and key which are used in MSP for peers and orderers. TLS certificate and key is also issued from Vault. Currenly there is init 41 | container which is using Vault Kubernetes Auth. Operator requires that HashiCorp Vault is properly configured. (https://www.vaultproject.io/docs/auth/kubernetes.html). 42 | PKI in Vault should be configured with roles MSP and TLS. 43 | 44 | Vault auth url is format `"$VAULT_ADDRESS"/v1/auth/kubernetes-"$REGION_NAME"/login`. Vault address could be changed via env variable OPERATOR_VAULT_ADDRESS in operator manifest. 45 | You can use annotation in peer and orderer resources to define REGION_NAME. 46 | 47 | .. code:: yaml 48 | 49 | apiVersion: hl-fabric.kompitech.com/v1alpha1 50 | kind: FabricPeer 51 | metadata: 52 | name: peer1 53 | namespace: 2657db63-8a32-41c6-814c-6fa3d21c4731 54 | annotations: 55 | region: Region1 56 | 57 | Istio is needed because operator will create by default Ingress for created services. 58 | 59 | Operator will also try to write dns record into Etcd. This etcd is backend for CoreDNS. Etcd address is currently staticly set to `etcd-client.etcd:2379`. 60 | 61 | 62 | Cleanup 63 | ------- 64 | 65 | If you want to delete all resources after you are done. 66 | 67 | .. code:: bash 68 | 69 | kubectl delete -f deploy/operator.yaml 70 | kubectl delete -f deploy/rbac.yaml 71 | kubectl delete -f deploy/crds 72 | -------------------------------------------------------------------------------- /api/v1alpha1/common.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 KompiTech GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | //SUPPORT 20 | const ( 21 | StateCreating string = "Creating" 22 | StateRunning string = "Running" 23 | StateUpdating string = "Updating" 24 | StateError string = "Error" 25 | StateSuspended string = "Suspended" 26 | ) 27 | 28 | type CertificateSecret struct { 29 | Name string `json:"name"` 30 | Value string `json:"value"` 31 | } 32 | -------------------------------------------------------------------------------- /api/v1alpha1/fabricorderer_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | "k8s.io/apimachinery/pkg/api/resource" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | ) 24 | 25 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 26 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 27 | 28 | // FabricOrdererSpec defines the desired state of FabricOrderer 29 | type FabricOrdererSpec struct { 30 | Image string `json:"image"` 31 | MetricsImage string `json:"metricsimage"` 32 | Replicas int32 `json:"replicas"` 33 | DataVolumeSize resource.Quantity `json:"datavolumesize"` 34 | CertVolumeSize resource.Quantity `json:"cavolumesize"` 35 | Organization string `json:"organization"` 36 | MspId string `json:"mspid"` 37 | CommonName string `json:"commonname"` 38 | SvcType corev1.ServiceType `json:"SvcType,omitempty"` 39 | Certificate []CertificateSecret `json:"certificate"` 40 | TLSCertificate []CertificateSecret `json:"tlscertificate"` 41 | Genesis string `json:"genesis"` 42 | Containers []corev1.Container `json:"containers"` 43 | NodeOUsEnabled bool `json:"nodeousenabled"` 44 | } 45 | 46 | //+kubebuilder:object:root=true 47 | //+kubebuilder:subresource:status 48 | 49 | // FabricOrderer is the Schema for the fabricorderers API 50 | type FabricOrderer struct { 51 | metav1.TypeMeta `json:",inline"` 52 | metav1.ObjectMeta `json:"metadata,omitempty"` 53 | 54 | Spec FabricOrdererSpec `json:"spec,omitempty"` 55 | State string `json:"state,omitempty"` 56 | Status FabricOrdererStatus `json:"status,omitempty"` 57 | } 58 | 59 | // FabricOrdererStatus defines the observed state of FabricOrderer 60 | type FabricOrdererStatus struct { 61 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 62 | // Important: Run "make" to regenerate code after modifying this file 63 | 64 | // Nodes store the name of the pods which are running FabricOrderer instances 65 | Nodes []string `json:"nodes,omitempty"` 66 | State string `json:"state,omitempty"` 67 | } 68 | 69 | //+kubebuilder:object:root=true 70 | 71 | // FabricOrdererList contains a list of FabricOrderer 72 | type FabricOrdererList struct { 73 | metav1.TypeMeta `json:",inline"` 74 | metav1.ListMeta `json:"metadata,omitempty"` 75 | Items []FabricOrderer `json:"items"` 76 | } 77 | 78 | func init() { 79 | SchemeBuilder.Register(&FabricOrderer{}, &FabricOrdererList{}) 80 | } 81 | -------------------------------------------------------------------------------- /api/v1alpha1/fabricpeer_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | "k8s.io/apimachinery/pkg/api/resource" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | ) 24 | 25 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 26 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 27 | 28 | // FabricPeerSpec defines the desired state of FabricPeer 29 | type FabricPeerSpec struct { 30 | Image string `json:"image"` 31 | BuilderImage string `json:"builderimage"` 32 | RuntimeImage string `json:"runtimeimage"` 33 | CouchDBImage string `json:"couchdbimage"` 34 | DINDImage string `json:"dindimage"` 35 | MetricsImage string `json:"metricsimage"` 36 | Replicas int32 `json:"replicas"` 37 | DataVolumeSize resource.Quantity `json:"datavolumesize,omitempty"` 38 | CertVolumeSize resource.Quantity `json:"certvolumesize,omitempty"` 39 | Organization string `json:"organization"` 40 | MspId string `json:"mspid"` 41 | CommonName string `json:"commonname"` 42 | BootstrapNodeAddress string `json:"bootstrapnodeaddress"` 43 | SvcType corev1.ServiceType `json:"SvcType,omitempty"` 44 | Certificate []CertificateSecret `json:"certificate"` 45 | TLSCertificate []CertificateSecret `json:"tlscertificate"` 46 | Containers []corev1.Container `json:"containers"` 47 | NodeOUsEnabled bool `json:"nodeousenabled"` 48 | } 49 | 50 | //+kubebuilder:object:root=true 51 | //+kubebuilder:subresource:status 52 | 53 | // FabricPeer is the Schema for the fabricpeers API 54 | type FabricPeer struct { 55 | metav1.TypeMeta `json:",inline"` 56 | metav1.ObjectMeta `json:"metadata,omitempty"` 57 | 58 | Spec FabricPeerSpec `json:"spec,omitempty"` 59 | State string `json:"state,omitempty"` 60 | Status FabricPeerStatus `json:"status,omitempty"` 61 | } 62 | 63 | // FabricPeerStatus defines the observed state of FabricPeer 64 | type FabricPeerStatus struct { 65 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 66 | // Important: Run "make" to regenerate code after modifying this file 67 | 68 | // Nodes store the name of the pods which are running FabricPeer instances 69 | Nodes []string `json:"nodes,omitempty"` 70 | State string `json:"state,omitempty"` 71 | } 72 | 73 | //+kubebuilder:object:root=true 74 | 75 | // FabricPeerList contains a list of FabricPeer 76 | type FabricPeerList struct { 77 | metav1.TypeMeta `json:",inline"` 78 | metav1.ListMeta `json:"metadata,omitempty"` 79 | Items []FabricPeer `json:"items"` 80 | } 81 | 82 | func init() { 83 | SchemeBuilder.Register(&FabricPeer{}, &FabricPeerList{}) 84 | } 85 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the fabric v1alpha1 API group 18 | //+kubebuilder:object:generate=true 19 | //+groupName=fabric.kompitech.com 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "fabric.kompitech.com", Version: "v1alpha1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | // +build !ignore_autogenerated 3 | 4 | /* 5 | Copyright 2021. 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | // Code generated by controller-gen. DO NOT EDIT. 21 | 22 | package v1alpha1 23 | 24 | import ( 25 | "k8s.io/api/core/v1" 26 | runtime "k8s.io/apimachinery/pkg/runtime" 27 | ) 28 | 29 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 30 | func (in *CertificateSecret) DeepCopyInto(out *CertificateSecret) { 31 | *out = *in 32 | } 33 | 34 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSecret. 35 | func (in *CertificateSecret) DeepCopy() *CertificateSecret { 36 | if in == nil { 37 | return nil 38 | } 39 | out := new(CertificateSecret) 40 | in.DeepCopyInto(out) 41 | return out 42 | } 43 | 44 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 45 | func (in *FabricOrderer) DeepCopyInto(out *FabricOrderer) { 46 | *out = *in 47 | out.TypeMeta = in.TypeMeta 48 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 49 | in.Spec.DeepCopyInto(&out.Spec) 50 | in.Status.DeepCopyInto(&out.Status) 51 | } 52 | 53 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricOrderer. 54 | func (in *FabricOrderer) DeepCopy() *FabricOrderer { 55 | if in == nil { 56 | return nil 57 | } 58 | out := new(FabricOrderer) 59 | in.DeepCopyInto(out) 60 | return out 61 | } 62 | 63 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 64 | func (in *FabricOrderer) DeepCopyObject() runtime.Object { 65 | if c := in.DeepCopy(); c != nil { 66 | return c 67 | } 68 | return nil 69 | } 70 | 71 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 72 | func (in *FabricOrdererList) DeepCopyInto(out *FabricOrdererList) { 73 | *out = *in 74 | out.TypeMeta = in.TypeMeta 75 | in.ListMeta.DeepCopyInto(&out.ListMeta) 76 | if in.Items != nil { 77 | in, out := &in.Items, &out.Items 78 | *out = make([]FabricOrderer, len(*in)) 79 | for i := range *in { 80 | (*in)[i].DeepCopyInto(&(*out)[i]) 81 | } 82 | } 83 | } 84 | 85 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricOrdererList. 86 | func (in *FabricOrdererList) DeepCopy() *FabricOrdererList { 87 | if in == nil { 88 | return nil 89 | } 90 | out := new(FabricOrdererList) 91 | in.DeepCopyInto(out) 92 | return out 93 | } 94 | 95 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 96 | func (in *FabricOrdererList) DeepCopyObject() runtime.Object { 97 | if c := in.DeepCopy(); c != nil { 98 | return c 99 | } 100 | return nil 101 | } 102 | 103 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 104 | func (in *FabricOrdererSpec) DeepCopyInto(out *FabricOrdererSpec) { 105 | *out = *in 106 | out.DataVolumeSize = in.DataVolumeSize.DeepCopy() 107 | out.CertVolumeSize = in.CertVolumeSize.DeepCopy() 108 | if in.Certificate != nil { 109 | in, out := &in.Certificate, &out.Certificate 110 | *out = make([]CertificateSecret, len(*in)) 111 | copy(*out, *in) 112 | } 113 | if in.TLSCertificate != nil { 114 | in, out := &in.TLSCertificate, &out.TLSCertificate 115 | *out = make([]CertificateSecret, len(*in)) 116 | copy(*out, *in) 117 | } 118 | if in.Containers != nil { 119 | in, out := &in.Containers, &out.Containers 120 | *out = make([]v1.Container, len(*in)) 121 | for i := range *in { 122 | (*in)[i].DeepCopyInto(&(*out)[i]) 123 | } 124 | } 125 | } 126 | 127 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricOrdererSpec. 128 | func (in *FabricOrdererSpec) DeepCopy() *FabricOrdererSpec { 129 | if in == nil { 130 | return nil 131 | } 132 | out := new(FabricOrdererSpec) 133 | in.DeepCopyInto(out) 134 | return out 135 | } 136 | 137 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 138 | func (in *FabricOrdererStatus) DeepCopyInto(out *FabricOrdererStatus) { 139 | *out = *in 140 | if in.Nodes != nil { 141 | in, out := &in.Nodes, &out.Nodes 142 | *out = make([]string, len(*in)) 143 | copy(*out, *in) 144 | } 145 | } 146 | 147 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricOrdererStatus. 148 | func (in *FabricOrdererStatus) DeepCopy() *FabricOrdererStatus { 149 | if in == nil { 150 | return nil 151 | } 152 | out := new(FabricOrdererStatus) 153 | in.DeepCopyInto(out) 154 | return out 155 | } 156 | 157 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 158 | func (in *FabricPeer) DeepCopyInto(out *FabricPeer) { 159 | *out = *in 160 | out.TypeMeta = in.TypeMeta 161 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 162 | in.Spec.DeepCopyInto(&out.Spec) 163 | in.Status.DeepCopyInto(&out.Status) 164 | } 165 | 166 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricPeer. 167 | func (in *FabricPeer) DeepCopy() *FabricPeer { 168 | if in == nil { 169 | return nil 170 | } 171 | out := new(FabricPeer) 172 | in.DeepCopyInto(out) 173 | return out 174 | } 175 | 176 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 177 | func (in *FabricPeer) DeepCopyObject() runtime.Object { 178 | if c := in.DeepCopy(); c != nil { 179 | return c 180 | } 181 | return nil 182 | } 183 | 184 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 185 | func (in *FabricPeerList) DeepCopyInto(out *FabricPeerList) { 186 | *out = *in 187 | out.TypeMeta = in.TypeMeta 188 | in.ListMeta.DeepCopyInto(&out.ListMeta) 189 | if in.Items != nil { 190 | in, out := &in.Items, &out.Items 191 | *out = make([]FabricPeer, len(*in)) 192 | for i := range *in { 193 | (*in)[i].DeepCopyInto(&(*out)[i]) 194 | } 195 | } 196 | } 197 | 198 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricPeerList. 199 | func (in *FabricPeerList) DeepCopy() *FabricPeerList { 200 | if in == nil { 201 | return nil 202 | } 203 | out := new(FabricPeerList) 204 | in.DeepCopyInto(out) 205 | return out 206 | } 207 | 208 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 209 | func (in *FabricPeerList) DeepCopyObject() runtime.Object { 210 | if c := in.DeepCopy(); c != nil { 211 | return c 212 | } 213 | return nil 214 | } 215 | 216 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 217 | func (in *FabricPeerSpec) DeepCopyInto(out *FabricPeerSpec) { 218 | *out = *in 219 | out.DataVolumeSize = in.DataVolumeSize.DeepCopy() 220 | out.CertVolumeSize = in.CertVolumeSize.DeepCopy() 221 | if in.Certificate != nil { 222 | in, out := &in.Certificate, &out.Certificate 223 | *out = make([]CertificateSecret, len(*in)) 224 | copy(*out, *in) 225 | } 226 | if in.TLSCertificate != nil { 227 | in, out := &in.TLSCertificate, &out.TLSCertificate 228 | *out = make([]CertificateSecret, len(*in)) 229 | copy(*out, *in) 230 | } 231 | if in.Containers != nil { 232 | in, out := &in.Containers, &out.Containers 233 | *out = make([]v1.Container, len(*in)) 234 | for i := range *in { 235 | (*in)[i].DeepCopyInto(&(*out)[i]) 236 | } 237 | } 238 | } 239 | 240 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricPeerSpec. 241 | func (in *FabricPeerSpec) DeepCopy() *FabricPeerSpec { 242 | if in == nil { 243 | return nil 244 | } 245 | out := new(FabricPeerSpec) 246 | in.DeepCopyInto(out) 247 | return out 248 | } 249 | 250 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 251 | func (in *FabricPeerStatus) DeepCopyInto(out *FabricPeerStatus) { 252 | *out = *in 253 | if in.Nodes != nil { 254 | in, out := &in.Nodes, &out.Nodes 255 | *out = make([]string, len(*in)) 256 | copy(*out, *in) 257 | } 258 | } 259 | 260 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricPeerStatus. 261 | func (in *FabricPeerStatus) DeepCopy() *FabricPeerStatus { 262 | if in == nil { 263 | return nil 264 | } 265 | out := new(FabricPeerStatus) 266 | in.DeepCopyInto(out) 267 | return out 268 | } 269 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/fabric.kompitech.com_fabricpeers.yaml 6 | - bases/fabric.kompitech.com_fabricorderers.yaml 7 | #+kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patchesStrategicMerge: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- patches/webhook_in_fabricpeers.yaml 13 | #- patches/webhook_in_fabricorderers.yaml 14 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 15 | 16 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 17 | # patches here are for enabling the CA injection for each CRD 18 | #- patches/cainjection_in_fabricpeers.yaml 19 | #- patches/cainjection_in_fabricorderers.yaml 20 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 21 | 22 | # the following config is for teaching kustomize how to do kustomization for CRDs. 23 | configurations: 24 | - kustomizeconfig.yaml 25 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_fabricorderers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: fabricorderers.fabric.kompitech.com 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_fabricpeers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: fabricpeers.fabric.kompitech.com 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_fabricorderers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: fabricorderers.fabric.kompitech.com 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_fabricpeers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: fabricpeers.fabric.kompitech.com 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: hl-fabric-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: hl-fabric-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - manager_auth_proxy_patch.yaml 32 | 33 | # Mount the controller config file for loading manager configurations 34 | # through a ComponentConfig type 35 | #- manager_config_patch.yaml 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # the following config is for teaching kustomize how to do var substitution 47 | vars: 48 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 49 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 50 | # objref: 51 | # kind: Certificate 52 | # group: cert-manager.io 53 | # version: v1 54 | # name: serving-cert # this name should match the one in certificate.yaml 55 | # fieldref: 56 | # fieldpath: metadata.namespace 57 | #- name: CERTIFICATE_NAME 58 | # objref: 59 | # kind: Certificate 60 | # group: cert-manager.io 61 | # version: v1 62 | # name: serving-cert # this name should match the one in certificate.yaml 63 | #- name: SERVICE_NAMESPACE # namespace of the service 64 | # objref: 65 | # kind: Service 66 | # version: v1 67 | # name: webhook-service 68 | # fieldref: 69 | # fieldpath: metadata.namespace 70 | #- name: SERVICE_NAME 71 | # objref: 72 | # kind: Service 73 | # version: v1 74 | # name: webhook-service 75 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | protocol: TCP 22 | name: https 23 | - name: manager 24 | args: 25 | - "--health-probe-bind-address=:8081" 26 | - "--metrics-bind-address=127.0.0.1:8080" 27 | - "--leader-elect" 28 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: 69a4dde1.kompitech.com 12 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | spec: 25 | securityContext: 26 | runAsNonRoot: true 27 | containers: 28 | - command: 29 | - /manager 30 | env: 31 | - name: OPERATOR_VAULT_ADDRESS 32 | value: https://vault-cluster.vault-operator:8200 33 | - name: VAULT_SKIP_VERIFY 34 | value: "true" 35 | args: 36 | - --leader-elect 37 | image: controller:latest 38 | name: manager 39 | securityContext: 40 | allowPrivilegeEscalation: false 41 | livenessProbe: 42 | httpGet: 43 | path: /healthz 44 | port: 8081 45 | initialDelaySeconds: 15 46 | periodSeconds: 20 47 | readinessProbe: 48 | httpGet: 49 | path: /readyz 50 | port: 8081 51 | initialDelaySeconds: 5 52 | periodSeconds: 10 53 | resources: 54 | limits: 55 | cpu: 200m 56 | memory: 100Mi 57 | requests: 58 | cpu: 100m 59 | memory: 20Mi 60 | serviceAccountName: controller-manager 61 | terminationGracePeriodSeconds: 10 62 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # These resources constitute the fully configured set of manifests 2 | # used to generate the 'manifests/' directory in a bundle. 3 | resources: 4 | - bases/hl-fabric-operator.clusterserviceversion.yaml 5 | - ../default 6 | - ../samples 7 | - ../scorecard 8 | 9 | # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. 10 | # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. 11 | # These patches remove the unnecessary "cert" volume and its manager container volumeMount. 12 | #patchesJson6902: 13 | #- target: 14 | # group: apps 15 | # version: v1 16 | # kind: Deployment 17 | # name: controller-manager 18 | # namespace: system 19 | # patch: |- 20 | # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. 21 | # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. 22 | # - op: remove 23 | # path: /spec/template/spec/containers/1/volumeMounts/0 24 | # # Remove the "cert" volume, since OLM will create and mount a set of certs. 25 | # # Update the indices in this path if adding or removing volumes in the manager's Deployment. 26 | # - op: remove 27 | # path: /spec/template/spec/volumes/0 28 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | scheme: https 15 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: 7 | - "/metrics" 8 | verbs: 9 | - get 10 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | protocol: TCP 13 | targetPort: https 14 | selector: 15 | control-plane: controller-manager 16 | -------------------------------------------------------------------------------- /config/rbac/fabricorderer_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit fabricorderers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: fabricorderer-editor-role 6 | rules: 7 | - apiGroups: 8 | - fabric.kompitech.com 9 | resources: 10 | - fabricorderers 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - fabric.kompitech.com 21 | resources: 22 | - fabricorderers/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/fabricorderer_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view fabricorderers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: fabricorderer-viewer-role 6 | rules: 7 | - apiGroups: 8 | - fabric.kompitech.com 9 | resources: 10 | - fabricorderers 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - fabric.kompitech.com 17 | resources: 18 | - fabricorderers/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/fabricpeer_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit fabricpeers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: fabricpeer-editor-role 6 | rules: 7 | - apiGroups: 8 | - fabric.kompitech.com 9 | resources: 10 | - fabricpeers 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - fabric.kompitech.com 21 | resources: 22 | - fabricpeers/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/fabricpeer_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view fabricpeers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: fabricpeer-viewer-role 6 | rules: 7 | - apiGroups: 8 | - fabric.kompitech.com 9 | resources: 10 | - fabricpeers 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - fabric.kompitech.com 17 | resources: 18 | - fabricpeers/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/manager-cluster-role.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: hl-fabric-operator-manager-role 5 | rules: 6 | - verbs: 7 | - "*" 8 | apiGroups: 9 | - "*" 10 | resources: 11 | - "*" -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: manager-role 8 | rules: 9 | - apiGroups: 10 | - fabric.kompitech.com 11 | resources: 12 | - fabricorderers 13 | verbs: 14 | - create 15 | - delete 16 | - get 17 | - list 18 | - patch 19 | - update 20 | - watch 21 | - apiGroups: 22 | - fabric.kompitech.com 23 | resources: 24 | - fabricorderers/finalizers 25 | verbs: 26 | - update 27 | - apiGroups: 28 | - fabric.kompitech.com 29 | resources: 30 | - fabricorderers/status 31 | verbs: 32 | - get 33 | - patch 34 | - update 35 | - apiGroups: 36 | - fabric.kompitech.com 37 | resources: 38 | - fabricpeers 39 | verbs: 40 | - create 41 | - delete 42 | - get 43 | - list 44 | - patch 45 | - update 46 | - watch 47 | - apiGroups: 48 | - fabric.kompitech.com 49 | resources: 50 | - fabricpeers/finalizers 51 | verbs: 52 | - update 53 | - apiGroups: 54 | - fabric.kompitech.com 55 | resources: 56 | - fabricpeers/status 57 | verbs: 58 | - get 59 | - patch 60 | - update 61 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | -------------------------------------------------------------------------------- /config/samples/fabric_v1alpha1_fabricorderer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fabric.kompitech.com/v1alpha1 2 | kind: FabricOrderer 3 | metadata: 4 | name: fabricorderer-sample 5 | spec: 6 | # Add fields here 7 | foo: bar 8 | -------------------------------------------------------------------------------- /config/samples/fabric_v1alpha1_fabricpeer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fabric.kompitech.com/v1alpha1 2 | kind: FabricPeer 3 | metadata: 4 | name: fabricpeer-sample 5 | spec: 6 | # Add fields here 7 | foo: bar 8 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - fabric_v1alpha1_fabricpeer.yaml 4 | - fabric_v1alpha1_fabricorderer.yaml 5 | #+kubebuilder:scaffold:manifestskustomizesamples 6 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | patchesJson6902: 4 | - path: patches/basic.config.yaml 5 | target: 6 | group: scorecard.operatorframework.io 7 | version: v1alpha3 8 | kind: Configuration 9 | name: config 10 | - path: patches/olm.config.yaml 11 | target: 12 | group: scorecard.operatorframework.io 13 | version: v1alpha3 14 | kind: Configuration 15 | name: config 16 | #+kubebuilder:scaffold:patchesJson6902 17 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.12.0 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - olm-bundle-validation 7 | image: quay.io/operator-framework/scorecard-test:v1.12.0 8 | labels: 9 | suite: olm 10 | test: olm-bundle-validation-test 11 | - op: add 12 | path: /stages/0/tests/- 13 | value: 14 | entrypoint: 15 | - scorecard-test 16 | - olm-crds-have-validation 17 | image: quay.io/operator-framework/scorecard-test:v1.12.0 18 | labels: 19 | suite: olm 20 | test: olm-crds-have-validation-test 21 | - op: add 22 | path: /stages/0/tests/- 23 | value: 24 | entrypoint: 25 | - scorecard-test 26 | - olm-crds-have-resources 27 | image: quay.io/operator-framework/scorecard-test:v1.12.0 28 | labels: 29 | suite: olm 30 | test: olm-crds-have-resources-test 31 | - op: add 32 | path: /stages/0/tests/- 33 | value: 34 | entrypoint: 35 | - scorecard-test 36 | - olm-spec-descriptors 37 | image: quay.io/operator-framework/scorecard-test:v1.12.0 38 | labels: 39 | suite: olm 40 | test: olm-spec-descriptors-test 41 | - op: add 42 | path: /stages/0/tests/- 43 | value: 44 | entrypoint: 45 | - scorecard-test 46 | - olm-status-descriptors 47 | image: quay.io/operator-framework/scorecard-test:v1.12.0 48 | labels: 49 | suite: olm 50 | test: olm-status-descriptors-test 51 | -------------------------------------------------------------------------------- /controllers/fabricorderer_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "context" 21 | "encoding/base64" 22 | "reflect" 23 | "time" 24 | 25 | crd "github.com/jiribroulik/pkg/apis/istio/v1alpha3" 26 | appsv1 "k8s.io/api/apps/v1" 27 | corev1 "k8s.io/api/core/v1" 28 | "k8s.io/apimachinery/pkg/api/errors" 29 | "k8s.io/apimachinery/pkg/api/resource" 30 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 | "k8s.io/apimachinery/pkg/runtime" 32 | "k8s.io/apimachinery/pkg/types" 33 | "k8s.io/apimachinery/pkg/util/intstr" 34 | ctrl "sigs.k8s.io/controller-runtime" 35 | "sigs.k8s.io/controller-runtime/pkg/client" 36 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 37 | "sigs.k8s.io/controller-runtime/pkg/log" 38 | 39 | fabricv1alpha1 "github.com/KompiTech/hyperledger-fabric-operator/api/v1alpha1" 40 | "github.com/KompiTech/hyperledger-fabric-operator/pkg/config" 41 | "github.com/KompiTech/hyperledger-fabric-operator/pkg/resources" 42 | "github.com/imdario/mergo" 43 | ) 44 | 45 | // FabricOrdererReconciler reconciles a FabricOrderer object 46 | type FabricOrdererReconciler struct { 47 | client.Client 48 | Scheme *runtime.Scheme 49 | } 50 | 51 | const ( 52 | ordererMSPPath = "/etc/hyperledger/orderer/msp/" 53 | ordererTLSPath = "/etc/hyperledger/orderer/tls/" 54 | ) 55 | 56 | //+kubebuilder:rbac:groups=fabric.kompitech.com,resources=fabricorderers,verbs=get;list;watch;create;update;patch;delete 57 | //+kubebuilder:rbac:groups=fabric.kompitech.com,resources=fabricorderers/status,verbs=get;update;patch 58 | //+kubebuilder:rbac:groups=fabric.kompitech.com,resources=fabricorderers/finalizers,verbs=update 59 | 60 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 61 | // move the current state of the cluster closer to the desired state. 62 | // TODO(user): Modify the Reconcile function to compare the state specified by 63 | // the FabricOrderer object against the actual cluster state, and then 64 | // perform operations to make the cluster state reflect the state specified by 65 | // the user. 66 | // 67 | // For more details, check Reconcile and its Result here: 68 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile 69 | func (r *FabricOrdererReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { 70 | _ = log.FromContext(ctx) 71 | 72 | reqLogger := log.Log.WithValues("namespace", request.Namespace) 73 | reqLogger = reqLogger.WithName(request.Name) 74 | reqLogger.Info("Reconciling FabricOrderer") 75 | defer reqLogger.Info("Reconcile done") 76 | 77 | // Fetch the FabricOrderer instance 78 | instance := &fabricv1alpha1.FabricOrderer{} 79 | err := r.Client.Get(ctx, request.NamespacedName, instance) 80 | if err != nil { 81 | if errors.IsNotFound(err) { 82 | // Request object not found, could have been deleted after reconcile request. 83 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 84 | // Return and don't requeue 85 | return ctrl.Result{}, nil 86 | } 87 | // Error reading the object - requeue the request. 88 | return ctrl.Result{}, err 89 | } 90 | 91 | // Change state to Running when enters in Updating to prevent infinite loop 92 | if instance.Status.State == fabricv1alpha1.StateUpdating { 93 | instance.Status.State = fabricv1alpha1.StateRunning 94 | err := r.Client.Update(ctx, instance) 95 | if err != nil { 96 | reqLogger.Error(err, "failed to update Fabric orderer status") 97 | return ctrl.Result{}, err 98 | } 99 | } 100 | 101 | //Set global namespace 102 | namespace := instance.GetNamespace() 103 | 104 | //Create namespace 105 | newServiceAccount := resources.NewServiceAccount("vault", namespace) 106 | currentServiceAccount := &corev1.ServiceAccount{} 107 | 108 | err = r.Client.Get(ctx, types.NamespacedName{Name: "vault", Namespace: namespace}, currentServiceAccount) 109 | if err != nil && errors.IsNotFound(err) { 110 | //Secret not exists 111 | reqLogger.Info("Creating a new service account", "Namespace", newServiceAccount.GetNamespace(), "Name", newServiceAccount.GetName()) 112 | err = r.Client.Create(ctx, newServiceAccount) 113 | if err != nil { 114 | return ctrl.Result{}, err 115 | } 116 | } else if err != nil { 117 | return ctrl.Result{}, err 118 | } 119 | 120 | //Create secrets for orderers with certificates` 121 | // for key, secretData := range instance.Spec.Certificate { 122 | newCertSecret := newCertificateSecret(instance.Name+"-cacerts", namespace, instance.Spec.Certificate) 123 | newTLSCertSecret := newCertificateSecret(instance.Name+"-tlscacerts", namespace, instance.Spec.TLSCertificate) 124 | newCertSecrets := []*corev1.Secret{newCertSecret, newTLSCertSecret} 125 | 126 | for _, newSecret := range newCertSecrets { 127 | // Set FabricOrderer instance as the owner and controller 128 | if err := controllerutil.SetControllerReference(instance, newSecret, r.Scheme); err != nil { 129 | return ctrl.Result{}, err 130 | } 131 | 132 | currentSecret := &corev1.Secret{} 133 | err = r.Client.Get(ctx, types.NamespacedName{Name: newSecret.Name, Namespace: newSecret.Namespace}, currentSecret) 134 | if err != nil && errors.IsNotFound(err) { 135 | //Secret not exists 136 | reqLogger.Info("Creating a new secret", "Namespace", newSecret.Namespace, "Name", newSecret.Name) 137 | err = r.Client.Create(ctx, newSecret) 138 | if err != nil { 139 | return ctrl.Result{}, err 140 | } 141 | } else if err != nil { 142 | return ctrl.Result{}, err 143 | } 144 | 145 | // Updating secrets 146 | eq := reflect.DeepEqual(newSecret.Data, currentSecret.Data) 147 | if !eq { 148 | reqLogger.Info("Updating secret", "Namespace", newSecret.Namespace, "Name", newSecret.Name) 149 | err = r.Client.Update(ctx, newSecret) 150 | if err != nil { 151 | return ctrl.Result{}, err 152 | } 153 | } 154 | } 155 | 156 | // genesis 157 | secretName := instance.GetName() + "-genesis" 158 | data := make(map[string][]byte) 159 | data["genesis.block"], _ = base64.StdEncoding.DecodeString(instance.Spec.Genesis) 160 | newSecret := &corev1.Secret{ 161 | TypeMeta: metav1.TypeMeta{ 162 | Kind: "Secret", 163 | APIVersion: "v1", 164 | }, 165 | ObjectMeta: metav1.ObjectMeta{ 166 | Name: secretName, 167 | Namespace: namespace, 168 | }, 169 | Data: data, 170 | } 171 | 172 | // Set FabricOrderer instance as the owner and controller 173 | if err := controllerutil.SetControllerReference(instance, newSecret, r.Scheme); err != nil { 174 | return ctrl.Result{}, err 175 | } 176 | 177 | currentSecret := &corev1.Secret{} 178 | err = r.Client.Get(ctx, types.NamespacedName{Name: newSecret.Name, Namespace: newSecret.Namespace}, currentSecret) 179 | if err != nil && errors.IsNotFound(err) { 180 | //Secret not exists 181 | reqLogger.Info("Creating a new secret", "Namespace", newSecret.Namespace, "Name", newSecret.Name) 182 | err = r.Client.Create(ctx, newSecret) 183 | if err != nil { 184 | return ctrl.Result{}, err 185 | } 186 | } else if err != nil { 187 | return ctrl.Result{}, err 188 | } 189 | 190 | // Updating secrets 191 | eq := reflect.DeepEqual(newSecret.Data, currentSecret.Data) 192 | if !eq { 193 | reqLogger.Info("Updating secret", "Namespace", newSecret.Namespace, "Name", newSecret.Name) 194 | err = r.Client.Update(ctx, newSecret) 195 | if err != nil { 196 | return ctrl.Result{}, err 197 | } 198 | } 199 | 200 | //Create sts for orderer 201 | // Define a new Statef object 202 | newSts := newOrdererStatefulSet(instance) 203 | pvcs := []corev1.PersistentVolumeClaim{} 204 | 205 | for _, item := range newOrdererVolumeClaimTemplates(instance) { 206 | pvc := item 207 | if err := controllerutil.SetControllerReference(instance, &pvc, r.Scheme); err != nil { 208 | return ctrl.Result{}, err 209 | } 210 | pvcs = append(pvcs, pvc) 211 | 212 | } 213 | newSts.Spec.VolumeClaimTemplates = pvcs 214 | 215 | // Set FabricOrderer instance as the owner and controller 216 | if err := controllerutil.SetControllerReference(instance, newSts, r.Scheme); err != nil { 217 | return ctrl.Result{}, err 218 | } 219 | 220 | // Check if this StatefulSet already exists 221 | currentSts := &appsv1.StatefulSet{} 222 | err = r.Client.Get(ctx, types.NamespacedName{Name: newSts.Name, Namespace: newSts.Namespace}, currentSts) 223 | if err != nil && errors.IsNotFound(err) { 224 | reqLogger.Info("Creating a new StatefulSet", "Namespace", newSts.Namespace, "Name", newSts.Name) 225 | err = r.Client.Create(ctx, newSts) 226 | if err != nil { 227 | return ctrl.Result{}, err 228 | } 229 | } else if err != nil { 230 | return ctrl.Result{}, err 231 | } 232 | 233 | candidate := currentSts.DeepCopy() 234 | 235 | if !reflect.DeepEqual(currentSts.Spec.Replicas, instance.Spec.Replicas) { 236 | candidate.Spec.Replicas = &instance.Spec.Replicas 237 | } 238 | 239 | for i, current := range candidate.Spec.Template.Spec.Containers { 240 | for j, new := range newSts.Spec.Template.Spec.Containers { 241 | if current.Name == new.Name { 242 | if !reflect.DeepEqual(current.Image, new.Image) { 243 | candidate.Spec.Template.Spec.Containers[i].Image = newSts.Spec.Template.Spec.Containers[j].Image 244 | } 245 | if !reflect.DeepEqual(current.Resources, new.Resources) { 246 | candidate.Spec.Template.Spec.Containers[i].Resources = newSts.Spec.Template.Spec.Containers[j].Resources 247 | } 248 | } 249 | } 250 | } 251 | 252 | if !reflect.DeepEqual(candidate.Spec, currentSts.Spec) { 253 | reqLogger.Info("UPDATING peer statefulset!!!", "Namespace", candidate.Namespace, "Name", candidate.Name) 254 | err = r.Client.Update(ctx, candidate) 255 | if err != nil { 256 | return ctrl.Result{}, err 257 | } 258 | instance.Status.State = fabricv1alpha1.StateUpdating 259 | err := r.Client.Update(ctx, instance) 260 | if err != nil { 261 | reqLogger.Error(err, "failed to update Fabric orderer status") 262 | return ctrl.Result{}, err 263 | } 264 | } else { 265 | reqLogger.Info("NOTHING to update!!!") 266 | } 267 | 268 | //Create Service 269 | newService := newOrdererService(instance) 270 | 271 | // Set FabricOrderer instance as the owner and controller 272 | if err := controllerutil.SetControllerReference(instance, newService, r.Scheme); err != nil { 273 | return ctrl.Result{}, err 274 | } 275 | 276 | // Check if this Service already exists 277 | currentService := &corev1.Service{} 278 | err = r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, currentService) 279 | if err != nil && errors.IsNotFound(err) { 280 | reqLogger.Info("Creating a new Service", "Namespace", newService.Namespace, "Name", newService.Name) 281 | err = r.Client.Create(ctx, newService) 282 | if err != nil { 283 | return ctrl.Result{}, err 284 | } 285 | } else if err != nil { 286 | return ctrl.Result{}, err 287 | } 288 | 289 | err = r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, currentService) 290 | if err != nil { 291 | return ctrl.Result{}, err 292 | } 293 | 294 | err = resources.CheckDNS(currentService.Spec.ClusterIP, currentService.GetObjectMeta().GetAnnotations()["fqdn"]) 295 | if err != nil { 296 | reqLogger.Error(err, "failed check/update dns", "Namespace", instance.Namespace, "Name", instance.Name, "ServiceIP", currentService.Spec.ClusterIP, "CurrentFQDN", currentService.GetObjectMeta().GetAnnotations()["fqdn"]) 297 | return ctrl.Result{}, err 298 | } 299 | 300 | //Create Gateway 301 | gatewayTemplate := resources.GatewayTemplate{ 302 | Name: instance.GetName(), 303 | Namespace: instance.GetNamespace(), 304 | Servers: resources.GetOrdererServerPorts(instance.Spec.CommonName), 305 | } 306 | newGateway := resources.NewGateway(gatewayTemplate) 307 | 308 | // Set FabricOrderer instance as the owner and controller 309 | if err := controllerutil.SetControllerReference(instance, newGateway, r.Scheme); err != nil { 310 | return ctrl.Result{}, err 311 | } 312 | 313 | // Check if this Gateway already exists 314 | currentGateway := &crd.Gateway{} 315 | err = r.Client.Get(ctx, types.NamespacedName{Name: newGateway.Name, Namespace: newGateway.Namespace}, currentGateway) 316 | if err != nil && errors.IsNotFound(err) { 317 | reqLogger.Info("Creating a new Gateway", "Namespace", newGateway.Namespace, "Name", newGateway.Name) 318 | err = r.Client.Create(ctx, newGateway) 319 | if err != nil { 320 | return ctrl.Result{}, err 321 | } 322 | } else if err != nil { 323 | return ctrl.Result{}, err 324 | } 325 | 326 | //Crate Virtual Service 327 | vsvcTemplate := resources.VirtualServiceTemplate{ 328 | Name: instance.GetName(), 329 | Namespace: instance.GetNamespace(), 330 | Spec: resources.GetOrdererVirtualServiceSpec(instance.GetName(), instance.Spec.CommonName), 331 | } 332 | newVirtualService := resources.NewVirtualService(vsvcTemplate) 333 | 334 | // Set FabricOrderer instance as the owner and controller 335 | if err := controllerutil.SetControllerReference(instance, newVirtualService, r.Scheme); err != nil { 336 | return ctrl.Result{}, err 337 | } 338 | 339 | // Check if this Virtual service already exists 340 | currentVirtualService := &crd.VirtualService{} 341 | err = r.Client.Get(ctx, types.NamespacedName{Name: newVirtualService.Name, Namespace: newVirtualService.Namespace}, currentVirtualService) 342 | if err != nil && errors.IsNotFound(err) { 343 | reqLogger.Info("Creating a new Istio Virtual Service", "Namespace", newVirtualService.Namespace, "Name", newVirtualService.Name) 344 | err = r.Client.Create(ctx, newVirtualService) 345 | if err != nil { 346 | return ctrl.Result{}, err 347 | } 348 | } else if err != nil { 349 | return ctrl.Result{}, err 350 | } 351 | 352 | //Update CR status 353 | pod := &corev1.Pod{} 354 | 355 | for ok := true; ok; ok = instance.Status.State == fabricv1alpha1.StateUpdating && pod.Status.Phase == "Running" { 356 | err = r.Client.Get(ctx, types.NamespacedName{Name: instance.Name + "-0", Namespace: instance.Namespace}, pod) 357 | if err != nil { 358 | if instance.Spec.Replicas != int32(0) { 359 | reqLogger.Error(err, "failed to get pods", "Namespace", instance.Namespace, "Name", instance.Name) 360 | return ctrl.Result{}, err 361 | } 362 | ordererState := fabricv1alpha1.StateSuspended 363 | reqLogger.Info("Update orderer status", "Namespace", instance.Namespace, "Name", instance.Name, "State", ordererState) 364 | instance.Status.State = ordererState 365 | err := r.Client.Update(ctx, instance) 366 | if err != nil { 367 | reqLogger.Error(err, "failed to update orderer status") 368 | return ctrl.Result{}, err 369 | } 370 | return ctrl.Result{}, nil 371 | } 372 | } 373 | 374 | ordererState := "" 375 | 376 | if pod.Status.Phase != "Running" { 377 | ordererState = fabricv1alpha1.StateCreating 378 | } else { 379 | for _, status := range pod.Status.ContainerStatuses { 380 | if status.Name == "orderer" { 381 | if status.State.Running != nil { 382 | ordererState = "Running" 383 | } else if status.RestartCount > 0 { 384 | ordererState = "Error" 385 | } 386 | } 387 | } 388 | } 389 | 390 | // Update status.Nodes if needed 391 | if ordererState != instance.Status.State { 392 | reqLogger.Info("Update fabricorderer status", "Namespace", instance.Namespace, "Name", instance.Name, "State", ordererState) 393 | instance.Status.State = ordererState 394 | err := r.Status().Update(ctx, instance) 395 | if err != nil { 396 | reqLogger.Error(err, "failed to update Fabric orderer status") 397 | return ctrl.Result{}, err 398 | } 399 | } 400 | 401 | podNames := []string{instance.Name + "-0"} 402 | 403 | // Update status.Nodes if needed 404 | if !reflect.DeepEqual(podNames, instance.Status.Nodes) { 405 | instance.Status.Nodes = podNames 406 | err := r.Status().Update(ctx, instance) 407 | if err != nil { 408 | reqLogger.Error(err, "Failed to update FabricOrderer status") 409 | return ctrl.Result{}, err 410 | } 411 | } 412 | 413 | // sts already exists - don't requeue 414 | reqLogger.Info("Skip reconcile: sts already exists", "sts.Namespace", currentSts.Namespace, "sts.Name", currentSts.Name) 415 | 416 | if ordererState == fabricv1alpha1.StateRunning { 417 | return ctrl.Result{}, nil 418 | } 419 | return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil 420 | } 421 | 422 | // SetupWithManager sets up the controller with the Manager. 423 | func (r *FabricOrdererReconciler) SetupWithManager(mgr ctrl.Manager) error { 424 | return ctrl.NewControllerManagedBy(mgr). 425 | For(&fabricv1alpha1.FabricOrderer{}). 426 | Complete(r) 427 | } 428 | 429 | func newCertificateSecret(name, namespace string, certs []fabricv1alpha1.CertificateSecret) *corev1.Secret { 430 | data := make(map[string][]byte) 431 | for _, item := range certs { 432 | data[name] = []byte(item.Value) 433 | } 434 | newSecret := &corev1.Secret{ 435 | TypeMeta: metav1.TypeMeta{ 436 | Kind: "Secret", 437 | APIVersion: "v1", 438 | }, 439 | ObjectMeta: metav1.ObjectMeta{ 440 | Name: name, 441 | Namespace: namespace, 442 | }, 443 | Data: data, 444 | } 445 | return newSecret 446 | } 447 | 448 | func newOrdererStatefulSet(cr *fabricv1alpha1.FabricOrderer) *appsv1.StatefulSet { 449 | replicas := cr.Spec.Replicas 450 | 451 | labels := newOrdererLabels(cr) 452 | 453 | return &appsv1.StatefulSet{ 454 | TypeMeta: metav1.TypeMeta{ 455 | Kind: "StatefulSet", 456 | APIVersion: "apps/v1", 457 | }, 458 | ObjectMeta: metav1.ObjectMeta{ 459 | Name: cr.Name, 460 | Namespace: cr.Namespace, 461 | Labels: labels, 462 | OwnerReferences: cr.OwnerReferences, 463 | }, 464 | Spec: appsv1.StatefulSetSpec{ 465 | ServiceName: cr.Name, 466 | Replicas: &replicas, 467 | Selector: &metav1.LabelSelector{ 468 | MatchLabels: labels, 469 | }, 470 | Template: corev1.PodTemplateSpec{ 471 | ObjectMeta: metav1.ObjectMeta{ 472 | Labels: labels, 473 | }, 474 | Spec: corev1.PodSpec{ 475 | ServiceAccountName: "vault", 476 | InitContainers: resources.GetInitContainer(resources.VaultInit{ 477 | Organization: cr.Spec.Organization, 478 | CommonName: cr.Spec.CommonName, 479 | VaultAddress: config.VaultAddress, 480 | TLSPath: ordererTLSPath, 481 | MSPPath: ordererMSPPath, 482 | Cluster: cr.GetAnnotations()["region"], 483 | NodeType: "orderer", 484 | }), //TODO 485 | Containers: newOrdererContainers(cr), 486 | Volumes: newOrdererVolumes(cr), 487 | }, 488 | }, 489 | 490 | UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ 491 | Type: appsv1.RollingUpdateStatefulSetStrategyType, 492 | }, 493 | //Volume Claims templates 494 | //VolumeClaimTemplates: newOrdererVolumeClaimTemplates(cr), 495 | }, 496 | } 497 | } 498 | 499 | func newOrdererContainers(cr *fabricv1alpha1.FabricOrderer) []corev1.Container { 500 | privileged := true 501 | procMount := corev1.DefaultProcMount 502 | 503 | metricsImage := cr.Spec.MetricsImage 504 | if metricsImage == "" { 505 | metricsImage = "kompitech/fabric-node-metrics:latest" 506 | } 507 | 508 | baseContainers := []corev1.Container{ 509 | { 510 | Name: "orderer", 511 | Image: cr.Spec.Image, 512 | ImagePullPolicy: corev1.PullIfNotPresent, 513 | //workingDir 514 | WorkingDir: "/opt/gopath/src/github.com/hyperledger/fabric/orderer", 515 | Ports: []corev1.ContainerPort{ 516 | { 517 | Name: "containerport1", 518 | ContainerPort: int32(7050), 519 | }, 520 | { 521 | Name: "containerport2", 522 | ContainerPort: int32(8080), 523 | }, 524 | }, 525 | Resources: corev1.ResourceRequirements{ 526 | Limits: corev1.ResourceList{ 527 | corev1.ResourceMemory: resource.MustParse("500Mi"), 528 | corev1.ResourceCPU: resource.MustParse("300m"), 529 | }, 530 | Requests: corev1.ResourceList{ 531 | corev1.ResourceMemory: resource.MustParse("500Mi"), 532 | corev1.ResourceCPU: resource.MustParse("300m"), 533 | }, 534 | }, 535 | //command 536 | Command: []string{ 537 | "/bin/sh", 538 | }, 539 | //args 540 | Args: []string{ 541 | "-c", 542 | "orderer", 543 | //"sleep 999999999999", 544 | }, 545 | 546 | // LivenessProbe: &v1.Probe{ 547 | // Handler: v1.Handler{ 548 | // HTTPGet: &v1.HTTPGetAction{ 549 | // Path: "/health", 550 | // Port: intstr.FromString(httpPortName), 551 | // }, 552 | // }, 553 | // InitialDelaySeconds: int32(30), 554 | // PeriodSeconds: int32(5), 555 | // }, 556 | // ReadinessProbe: &corev1.Probe{ 557 | // Handler: corev1.Handler{ 558 | // HTTPGet: &corev1.HTTPGetAction{ 559 | // Path: "/health?ready=1", 560 | // Port: intstr.FromInt(7050), 561 | // }, 562 | // }, 563 | // InitialDelaySeconds: int32(10), 564 | // PeriodSeconds: int32(5), 565 | // FailureThreshold: int32(25), 566 | // }, 567 | 568 | //Volume mount 569 | VolumeMounts: newOrdererVolumeMounts(cr), 570 | TerminationMessagePath: "/dev/termination-log", 571 | TerminationMessagePolicy: "File", 572 | //ENV 573 | Env: newOrdererContainerEnv(cr), 574 | }, 575 | { 576 | Name: "metrics", 577 | Image: metricsImage, 578 | ImagePullPolicy: corev1.PullAlways, 579 | SecurityContext: &corev1.SecurityContext{ 580 | Privileged: &privileged, 581 | ProcMount: &procMount, 582 | }, 583 | Resources: corev1.ResourceRequirements{ 584 | Limits: corev1.ResourceList{ 585 | corev1.ResourceMemory: resource.MustParse("50Mi"), 586 | corev1.ResourceCPU: resource.MustParse("50m"), 587 | }, 588 | }, 589 | Ports: []corev1.ContainerPort{ 590 | { 591 | Name: "metrics", 592 | Protocol: "TCP", 593 | ContainerPort: int32(9141), 594 | }, 595 | }, 596 | TerminationMessagePath: "/dev/termination-log", 597 | TerminationMessagePolicy: "File", 598 | Env: []corev1.EnvVar{ 599 | { 600 | Name: "NODE_NAME", 601 | Value: cr.Name, 602 | }, 603 | { 604 | Name: "NODE_TYPE", 605 | Value: "orderer", 606 | }, 607 | { 608 | Name: "MSP_DIR", 609 | Value: "/etc/hyperledger/orderer/msp", 610 | }, 611 | { 612 | Name: "TLS_DIR", 613 | Value: "/etc/hyperledger/orderer/tls", 614 | }, 615 | }, 616 | VolumeMounts: []corev1.VolumeMount{ 617 | { 618 | Name: "certificate", 619 | MountPath: "/etc/hyperledger/orderer/msp", 620 | SubPath: "data/msp", 621 | }, 622 | { 623 | Name: "certificate", 624 | MountPath: "/etc/hyperledger/orderer/tls", 625 | SubPath: "data/tls", 626 | }, 627 | }, 628 | }, 629 | } 630 | 631 | if cr.Spec.Containers != nil { 632 | for _, c := range cr.Spec.Containers { 633 | for _, cBase := range baseContainers { 634 | if c.Name == cBase.Name { 635 | if err := mergo.Merge(&cBase, c, mergo.WithOverride); err != nil { 636 | //Handle error 637 | } 638 | } 639 | } 640 | } 641 | } 642 | 643 | return baseContainers 644 | } 645 | 646 | func newOrdererContainerEnv(cr *fabricv1alpha1.FabricOrderer) []corev1.EnvVar { 647 | env := []corev1.EnvVar{ 648 | { 649 | Name: "ORDERER_METRICS_PROVIDER", 650 | Value: "prometheus", 651 | }, 652 | { 653 | Name: "ORDERER_METRICS_PROMETHEUS_HANDLERPATH", 654 | Value: "/metrics", 655 | }, 656 | { 657 | Name: "ORDERER_OPERATIONS_LISTENADDRESS", 658 | Value: "0.0.0.0:8080", 659 | }, 660 | { 661 | Name: "ORDERER_GENERAL_LOGLEVEL", 662 | Value: "info", 663 | }, 664 | { 665 | Name: "ORDERER_HOST", 666 | Value: cr.Spec.CommonName, 667 | }, 668 | { 669 | Name: "ORDERER_GENERAL_LISTENADDRESS", 670 | Value: "0.0.0.0", 671 | }, 672 | { 673 | Name: "ORDERER_GENERAL_GENESISMETHOD", 674 | Value: "file", 675 | }, 676 | { 677 | Name: "ORDERER_GENERAL_TLS_ENABLED", 678 | Value: "true", 679 | }, 680 | { 681 | Name: "ORDERER_GENERAL_LOCALMSPID", 682 | Value: cr.Spec.MspId, 683 | }, 684 | { 685 | Name: "ORDERER_GENERAL_LOCALMSPDIR", 686 | Value: "/etc/hyperledger/orderer/msp", 687 | }, 688 | { 689 | Name: "ORDERER_GENERAL_GENESISFILE", 690 | Value: "/etc/hyperledger/orderer/orderer.genesis.block/genesis.block", 691 | }, 692 | { 693 | Name: "ORDERER_GENERAL_TLS_PRIVATEKEY", 694 | Value: "/etc/hyperledger/orderer/tls/cert.key", 695 | }, 696 | { 697 | Name: "ORDERER_GENERAL_TLS_CERTIFICATE", 698 | Value: "/etc/hyperledger/orderer/tls/cert.crt", 699 | }, 700 | { 701 | Name: "ORDERER_GENERAL_TLS_ROOTCAS", 702 | Value: "/etc/hyperledger/orderer/tls/ca.crt", 703 | }, 704 | { 705 | Name: "ORDERER_GENERAL_TLS_CLIENTAUTHREQUIRED", 706 | Value: "false", 707 | }, 708 | { 709 | Name: "ORDERER_GENERAL_TLS_CLIENTROOTCAS", 710 | Value: "/etc/hyperledger/orderer/tls/ca.crt", 711 | }, 712 | { 713 | Name: "FABRIC_CA_CLIENT_TLS_CERTFILES", 714 | Value: "/etc/hyperledger/orderer/tls/ca.crt", 715 | }, 716 | { 717 | Name: "ORG_ADMIN_CERT", 718 | Value: "/etc/hyperledger/orderer/msp/admincerts/cert.pem", 719 | }, 720 | // { 721 | // Name: "ORDERER_KAFKA_TLS_ENABLED", 722 | // Value: "true", 723 | // }, 724 | // { 725 | // Name: "ORDERER_KAFKA_TLS_PRIVATEKEY_FILE", 726 | // Value: "/etc/hyperledger/orderer/tls/kafka_cert.key", 727 | // }, 728 | // { 729 | // Name: "ORDERER_KAFKA_TLS_CERTIFICATE_FILE", 730 | // Value: "/etc/hyperledger/orderer/tls/kafka_cert.crt", 731 | // }, 732 | // { 733 | // Name: "ORDERER_KAFKA_TLS_ROOTCAS_FILE", 734 | // Value: "/etc/hyperledger/orderer/tls/kafka_ca.crt", 735 | // }, 736 | { 737 | Name: "ORDERER_GENERAL_CLUSTER_ROOTCAS", 738 | Value: "/etc/hyperledger/orderer/tls/ca.crt", 739 | }, 740 | { 741 | Name: "ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY", 742 | Value: "/etc/hyperledger/orderer/tls/cert.key", 743 | }, 744 | { 745 | Name: "ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE", 746 | Value: "/etc/hyperledger/orderer/tls/cert.crt", 747 | }, 748 | { 749 | Name: "GODEBUG", 750 | Value: "netdns=go", 751 | }, 752 | } 753 | return env 754 | } 755 | 756 | func newOrdererService(cr *fabricv1alpha1.FabricOrderer) *corev1.Service { 757 | annotations := make(map[string]string) 758 | 759 | annotations["fqdn"] = cr.Spec.CommonName 760 | annotations["prometheus.io/scrape"] = "true" 761 | annotations["prometheus.io/port"] = "8080" 762 | 763 | var svcObjectMeta metav1.ObjectMeta 764 | var svcSpec corev1.ServiceSpec 765 | svcObjectMeta = metav1.ObjectMeta{ 766 | Name: cr.GetName(), 767 | Namespace: cr.GetNamespace(), 768 | Labels: newOrdererLabels(cr), 769 | Annotations: annotations, 770 | } 771 | 772 | svcSpec = corev1.ServiceSpec{ 773 | Selector: newOrdererLabels(cr), 774 | Type: cr.Spec.SvcType, 775 | Ports: []corev1.ServicePort{ 776 | { 777 | Name: "grpc-orderer", 778 | Protocol: "TCP", 779 | Port: int32(7050), 780 | TargetPort: intstr.FromInt(int(7050)), 781 | }, 782 | { 783 | Name: "metrics", 784 | Protocol: "TCP", 785 | Port: int32(8080), 786 | TargetPort: intstr.FromInt(int(8080)), 787 | }, 788 | }, 789 | } 790 | 791 | if cr.Spec.SvcType == "Headless" { 792 | svcSpec.Type = "None" 793 | } 794 | 795 | return &corev1.Service{ 796 | TypeMeta: metav1.TypeMeta{ 797 | Kind: "Service", 798 | APIVersion: "v1", 799 | }, 800 | ObjectMeta: svcObjectMeta, 801 | Spec: svcSpec, 802 | } 803 | } 804 | 805 | func newOrdererVolumes(cr *fabricv1alpha1.FabricOrderer) []corev1.Volume { 806 | volumes := []corev1.Volume{ 807 | { 808 | Name: newGenesisSecretName(cr.GetName()), 809 | VolumeSource: corev1.VolumeSource{ 810 | Secret: &corev1.SecretVolumeSource{ 811 | SecretName: newGenesisSecretName(cr.GetName()), 812 | }, 813 | }, 814 | }, 815 | } 816 | 817 | if cr.Spec.NodeOUsEnabled { 818 | volumes = append(volumes, corev1.Volume{ 819 | Name: "node-ous", 820 | VolumeSource: corev1.VolumeSource{ 821 | ConfigMap: &corev1.ConfigMapVolumeSource{ 822 | LocalObjectReference: corev1.LocalObjectReference{ 823 | Name: "node-ous", 824 | }, 825 | }, 826 | }, 827 | }) 828 | } 829 | 830 | volumes = append(volumes, corev1.Volume{ 831 | Name: cr.GetName() + "-cacerts", 832 | VolumeSource: corev1.VolumeSource{ 833 | Secret: &corev1.SecretVolumeSource{ 834 | SecretName: cr.GetName() + "-cacerts", 835 | }, 836 | }, 837 | }) 838 | 839 | volumes = append(volumes, corev1.Volume{ 840 | Name: cr.GetName() + "-tlscacerts", 841 | VolumeSource: corev1.VolumeSource{ 842 | Secret: &corev1.SecretVolumeSource{ 843 | SecretName: cr.GetName() + "-tlscacerts", 844 | }, 845 | }, 846 | }) 847 | 848 | return volumes 849 | 850 | } 851 | 852 | func newOrdererVolumeMounts(cr *fabricv1alpha1.FabricOrderer) []corev1.VolumeMount { 853 | 854 | //Basic static volume mounts 855 | volumeMounts := []corev1.VolumeMount{ 856 | { 857 | Name: "certificate", 858 | MountPath: "/etc/hyperledger/orderer/msp", 859 | SubPath: "data/msp", 860 | }, 861 | { 862 | Name: "certificate", 863 | MountPath: "/etc/hyperledger/orderer/tls", 864 | SubPath: "data/tls", 865 | }, 866 | { 867 | Name: newGenesisSecretName(cr.GetName()), 868 | MountPath: "/etc/hyperledger/orderer/orderer.genesis.block", 869 | }, 870 | { 871 | Name: "ordererdata", 872 | MountPath: "/var/hyperledger/production", 873 | SubPath: "data/ordererdata", 874 | }, 875 | } 876 | 877 | if cr.Spec.NodeOUsEnabled { 878 | volumeMounts = append(volumeMounts, corev1.VolumeMount{ 879 | Name: "node-ous", 880 | MountPath: "/etc/hyperledger/orderer/msp/config.yaml", 881 | SubPath: "config.yaml", 882 | }) 883 | } 884 | 885 | //Add volume mounts for secrets with certificates 886 | volumeMounts = append(volumeMounts, corev1.VolumeMount{ 887 | Name: cr.ObjectMeta.Name + "-cacerts", 888 | MountPath: ordererMSPPath + "cacerts", 889 | }) 890 | volumeMounts = append(volumeMounts, corev1.VolumeMount{ 891 | Name: cr.ObjectMeta.Name + "-tlscacerts", 892 | MountPath: ordererMSPPath + "tlscacerts", 893 | }) 894 | 895 | return volumeMounts 896 | } 897 | 898 | func newOrdererVolumeClaimTemplates(cr *fabricv1alpha1.FabricOrderer) []corev1.PersistentVolumeClaim { 899 | return []corev1.PersistentVolumeClaim{ 900 | { 901 | TypeMeta: metav1.TypeMeta{ 902 | Kind: "PersistentVolumeClaim", 903 | APIVersion: "v1", 904 | }, 905 | ObjectMeta: metav1.ObjectMeta{ 906 | Name: "ordererdata", 907 | Namespace: cr.Namespace, 908 | }, 909 | Spec: corev1.PersistentVolumeClaimSpec{ 910 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 911 | Resources: corev1.ResourceRequirements{ 912 | Requests: corev1.ResourceList{ 913 | corev1.ResourceStorage: cr.Spec.DataVolumeSize, 914 | }, 915 | }, 916 | }, 917 | }, 918 | { 919 | TypeMeta: metav1.TypeMeta{ 920 | Kind: "PersistentVolumeClaim", 921 | APIVersion: "v1", 922 | }, 923 | ObjectMeta: metav1.ObjectMeta{ 924 | Name: "certificate", 925 | Namespace: cr.Namespace, 926 | }, 927 | Spec: corev1.PersistentVolumeClaimSpec{ 928 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 929 | Resources: corev1.ResourceRequirements{ 930 | Requests: corev1.ResourceList{ 931 | corev1.ResourceStorage: cr.Spec.CertVolumeSize, 932 | }, 933 | }, 934 | }, 935 | }, 936 | } 937 | } 938 | 939 | func newOrdererLabels(cr *fabricv1alpha1.FabricOrderer) map[string]string { 940 | 941 | return map[string]string{ 942 | "app": cr.Kind, 943 | "name": cr.Name, 944 | } 945 | } 946 | 947 | func newGenesisSecretName(name string) string { 948 | return name + "-genesis" 949 | } 950 | -------------------------------------------------------------------------------- /controllers/fabricpeer_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "context" 21 | "reflect" 22 | "time" 23 | 24 | crd "github.com/jiribroulik/pkg/apis/istio/v1alpha3" 25 | appsv1 "k8s.io/api/apps/v1" 26 | corev1 "k8s.io/api/core/v1" 27 | "k8s.io/apimachinery/pkg/api/errors" 28 | "k8s.io/apimachinery/pkg/api/resource" 29 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 | "k8s.io/apimachinery/pkg/runtime" 31 | "k8s.io/apimachinery/pkg/types" 32 | "k8s.io/apimachinery/pkg/util/intstr" 33 | ctrl "sigs.k8s.io/controller-runtime" 34 | "sigs.k8s.io/controller-runtime/pkg/client" 35 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 36 | "sigs.k8s.io/controller-runtime/pkg/log" 37 | 38 | fabricv1alpha1 "github.com/KompiTech/hyperledger-fabric-operator/api/v1alpha1" 39 | "github.com/KompiTech/hyperledger-fabric-operator/pkg/config" 40 | "github.com/KompiTech/hyperledger-fabric-operator/pkg/resources" 41 | "github.com/imdario/mergo" 42 | ) 43 | 44 | const ( 45 | peerMSPPath = "/etc/hyperledger/fabric/msp/" 46 | peerTLSPath = "/etc/hyperledger/fabric/tls/" 47 | ) 48 | 49 | // FabricPeerReconciler reconciles a FabricPeer object 50 | type FabricPeerReconciler struct { 51 | client.Client 52 | Scheme *runtime.Scheme 53 | } 54 | 55 | //+kubebuilder:rbac:groups=fabric.kompitech.com,resources=fabricpeers,verbs=get;list;watch;create;update;patch;delete 56 | //+kubebuilder:rbac:groups=fabric.kompitech.com,resources=fabricpeers/status,verbs=get;update;patch 57 | //+kubebuilder:rbac:groups=fabric.kompitech.com,resources=fabricpeers/finalizers,verbs=update 58 | 59 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 60 | // move the current state of the cluster closer to the desired state. 61 | // TODO(user): Modify the Reconcile function to compare the state specified by 62 | // the FabricPeer object against the actual cluster state, and then 63 | // perform operations to make the cluster state reflect the state specified by 64 | // the user. 65 | // 66 | // For more details, check Reconcile and its Result here: 67 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile 68 | func (r *FabricPeerReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { 69 | _ = log.FromContext(ctx) 70 | 71 | reqLogger := log.Log.WithValues("namespace", request.Namespace) 72 | reqLogger = reqLogger.WithName(request.Name) 73 | reqLogger.Info("Reconciling FabricPeer") 74 | defer reqLogger.Info("Reconcile done") 75 | 76 | // Fetch the FabricPeer instance 77 | instance := &fabricv1alpha1.FabricPeer{} 78 | err := r.Client.Get(ctx, request.NamespacedName, instance) 79 | if err != nil { 80 | if errors.IsNotFound(err) { 81 | // Request object not found, could have been deleted after reconcile request. 82 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 83 | // Return and don't requeue 84 | return ctrl.Result{}, nil 85 | } 86 | // Error reading the object - requeue the request. 87 | return ctrl.Result{}, err 88 | } 89 | 90 | reqLogger.Info("State: " + instance.Status.State) 91 | 92 | // Change state to Running when enters in Updating to prevent infinite loop 93 | if instance.Status.State == fabricv1alpha1.StateUpdating { 94 | instance.Status.State = fabricv1alpha1.StateRunning 95 | err := r.Client.Update(ctx, instance) 96 | if err != nil { 97 | reqLogger.Error(err, "failed to update Fabric peer status") 98 | return ctrl.Result{}, err 99 | } 100 | } 101 | 102 | //Set global namespace 103 | namespace := instance.GetNamespace() 104 | 105 | //Create namespace 106 | newServiceAccount := resources.NewServiceAccount("vault", namespace) 107 | currentServiceAccount := &corev1.ServiceAccount{} 108 | 109 | err = r.Client.Get(ctx, types.NamespacedName{Name: "vault", Namespace: namespace}, currentServiceAccount) 110 | if err != nil && errors.IsNotFound(err) { 111 | //Secret not exists 112 | reqLogger.Info("Creating a new service account", "Namespace", newServiceAccount.GetNamespace(), "Name", newServiceAccount.GetName()) 113 | err = r.Client.Create(ctx, newServiceAccount) 114 | if err != nil { 115 | return ctrl.Result{}, err 116 | } 117 | } else if err != nil { 118 | return ctrl.Result{}, err 119 | } 120 | 121 | //Create secrets for peers with certificates` 122 | newCertSecret := newCertificateSecret(instance.Name+"-cacerts", namespace, instance.Spec.Certificate) 123 | newTLSCertSecret := newCertificateSecret(instance.Name+"-tlscacerts", namespace, instance.Spec.TLSCertificate) 124 | newCertSecrets := []*corev1.Secret{newCertSecret, newTLSCertSecret} 125 | 126 | for _, newSecret := range newCertSecrets { 127 | // Set FabricPeer instance as the owner and controller 128 | if err := controllerutil.SetControllerReference(instance, newSecret, r.Scheme); err != nil { 129 | return ctrl.Result{}, err 130 | } 131 | 132 | currentSecret := &corev1.Secret{} 133 | err = r.Client.Get(ctx, types.NamespacedName{Name: newSecret.Name, Namespace: newSecret.Namespace}, currentSecret) 134 | if err != nil && errors.IsNotFound(err) { 135 | //Secret not exists 136 | reqLogger.Info("Creating a new secret", "Namespace", newSecret.Namespace, "Name", newSecret.Name) 137 | err = r.Client.Create(ctx, newSecret) 138 | if err != nil { 139 | return ctrl.Result{}, err 140 | } 141 | } else if err != nil { 142 | return ctrl.Result{}, err 143 | } 144 | // Updating secrets 145 | eq := reflect.DeepEqual(newSecret.Data, currentSecret.Data) 146 | if !eq { 147 | reqLogger.Info("Updating secret", "Namespace", newSecret.Namespace, "Name", newSecret.Name) 148 | err = r.Client.Update(ctx, newSecret) 149 | if err != nil { 150 | return ctrl.Result{}, err 151 | } 152 | } 153 | } 154 | 155 | //Create sts for peer 156 | // Define a new Statef object 157 | newSts := newPeerStatefulSet(instance) 158 | pvcs := []corev1.PersistentVolumeClaim{} 159 | 160 | for _, item := range newPeerVolumeClaimTemplates(instance) { 161 | pvc := item 162 | if err := controllerutil.SetControllerReference(instance, &pvc, r.Scheme); err != nil { 163 | return ctrl.Result{}, err 164 | } 165 | pvcs = append(pvcs, pvc) 166 | 167 | } 168 | newSts.Spec.VolumeClaimTemplates = pvcs 169 | 170 | // Set FabricPeer instance as the owner and controller 171 | if err := controllerutil.SetControllerReference(instance, newSts, r.Scheme); err != nil { 172 | return ctrl.Result{}, err 173 | } 174 | 175 | // Check if this StatefulSet already exists 176 | currentSts := &appsv1.StatefulSet{} 177 | err = r.Client.Get(ctx, types.NamespacedName{Name: newSts.Name, Namespace: newSts.Namespace}, currentSts) 178 | if err != nil && errors.IsNotFound(err) { 179 | reqLogger.Info("Creating a new StatefulSet", "Namespace", newSts.Namespace, "Name", newSts.Name) 180 | err = r.Client.Create(ctx, newSts) 181 | if err != nil { 182 | return ctrl.Result{}, err 183 | } 184 | } else if err != nil { 185 | return ctrl.Result{}, err 186 | } 187 | 188 | candidate := currentSts.DeepCopy() 189 | 190 | if !reflect.DeepEqual(currentSts.Spec.Replicas, instance.Spec.Replicas) { 191 | candidate.Spec.Replicas = &instance.Spec.Replicas 192 | } 193 | 194 | for i, current := range candidate.Spec.Template.Spec.Containers { 195 | for j, new := range newSts.Spec.Template.Spec.Containers { 196 | if current.Name == new.Name { 197 | if !reflect.DeepEqual(current.Image, new.Image) { 198 | candidate.Spec.Template.Spec.Containers[i].Image = newSts.Spec.Template.Spec.Containers[j].Image 199 | } 200 | if !reflect.DeepEqual(current.Resources, new.Resources) { 201 | candidate.Spec.Template.Spec.Containers[i].Resources = newSts.Spec.Template.Spec.Containers[j].Resources 202 | } 203 | } 204 | } 205 | } 206 | 207 | if !reflect.DeepEqual(candidate.Spec, currentSts.Spec) { 208 | reqLogger.Info("UPDATING peer statefulset!!!", "Namespace", candidate.Namespace, "Name", candidate.Name) 209 | err = r.Client.Update(ctx, candidate) 210 | if err != nil { 211 | return ctrl.Result{}, err 212 | } 213 | instance.Status.State = fabricv1alpha1.StateUpdating 214 | err := r.Client.Update(ctx, instance) 215 | if err != nil { 216 | reqLogger.Error(err, "failed to update Fabric peer status") 217 | return ctrl.Result{}, err 218 | } 219 | } else { 220 | reqLogger.Info("NOTHING to update!!!") 221 | } 222 | 223 | //Create Service 224 | newService := newPeerService(instance) 225 | 226 | // Set FabricPeer instance as the owner and controller 227 | if err := controllerutil.SetControllerReference(instance, newService, r.Scheme); err != nil { 228 | return ctrl.Result{}, err 229 | } 230 | 231 | // Check if this Service already exists 232 | currentService := &corev1.Service{} 233 | err = r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, currentService) 234 | if err != nil && errors.IsNotFound(err) { 235 | reqLogger.Info("Creating a new Service", "Namespace", newService.Namespace, "Name", newService.Name) 236 | err = r.Client.Create(ctx, newService) 237 | if err != nil { 238 | return ctrl.Result{}, err 239 | } 240 | } else if err != nil { 241 | return ctrl.Result{}, err 242 | } 243 | 244 | err = r.Client.Get(ctx, types.NamespacedName{Name: newService.Name, Namespace: newService.Namespace}, currentService) 245 | if err != nil { 246 | return ctrl.Result{}, err 247 | } 248 | 249 | err = resources.CheckDNS(currentService.Spec.ClusterIP, currentService.GetObjectMeta().GetAnnotations()["fqdn"]) 250 | if err != nil { 251 | reqLogger.Error(err, "failed check/update dns", "Namespace", instance.Namespace, "Name", instance.Name, "ServiceIP", currentService.Spec.ClusterIP, "CurrentFQDN", currentService.GetObjectMeta().GetAnnotations()["fqdn"]) 252 | return ctrl.Result{}, err 253 | } 254 | 255 | //Create Gateway 256 | gatewayTemplate := resources.GatewayTemplate{ 257 | Name: instance.GetName(), 258 | Namespace: instance.GetNamespace(), 259 | Servers: resources.GetPeerServerPorts(instance.Spec.CommonName), 260 | } 261 | newGateway := resources.NewGateway(gatewayTemplate) 262 | 263 | // Set FabricPeer instance as the owner and controller 264 | if err := controllerutil.SetControllerReference(instance, newGateway, r.Scheme); err != nil { 265 | return ctrl.Result{}, err 266 | } 267 | 268 | // Check if this Gateway already exists 269 | currentGateway := &crd.Gateway{} 270 | err = r.Client.Get(ctx, types.NamespacedName{Name: newGateway.Name, Namespace: newGateway.Namespace}, currentGateway) 271 | if err != nil && errors.IsNotFound(err) { 272 | reqLogger.Info("Creating a new Gateway", "Namespace", newGateway.Namespace, "Name", newGateway.Name) 273 | err = r.Client.Create(ctx, newGateway) 274 | if err != nil { 275 | return ctrl.Result{}, err 276 | } 277 | } else if err != nil { 278 | return ctrl.Result{}, err 279 | } 280 | 281 | //Crate Virtual Service 282 | vsvcTemplate := resources.VirtualServiceTemplate{ 283 | Name: instance.GetName(), 284 | Namespace: instance.GetNamespace(), 285 | Spec: resources.GetPeerVirtualServiceSpec(instance.GetName(), instance.Spec.CommonName), 286 | } 287 | newVirtualService := resources.NewVirtualService(vsvcTemplate) 288 | 289 | // Set FabricPeer instance as the owner and controller 290 | if err := controllerutil.SetControllerReference(instance, newVirtualService, r.Scheme); err != nil { 291 | return ctrl.Result{}, err 292 | } 293 | 294 | // Check if this Gateway already exists 295 | currentVirtualService := &crd.VirtualService{} 296 | err = r.Client.Get(ctx, types.NamespacedName{Name: newVirtualService.Name, Namespace: newVirtualService.Namespace}, currentVirtualService) 297 | if err != nil && errors.IsNotFound(err) { 298 | reqLogger.Info("Creating a new Istio Virtual Service", "Namespace", newVirtualService.Namespace, "Name", newVirtualService.Name) 299 | err = r.Client.Create(ctx, newVirtualService) 300 | if err != nil { 301 | return ctrl.Result{}, err 302 | } 303 | } else if err != nil { 304 | return ctrl.Result{}, err 305 | } 306 | 307 | //Update CR status 308 | pod := &corev1.Pod{} 309 | 310 | for ok := true; ok; ok = instance.Status.State == fabricv1alpha1.StateUpdating && pod.Status.Phase == "Running" { 311 | err = r.Client.Get(ctx, types.NamespacedName{Name: instance.Name + "-0", Namespace: instance.Namespace}, pod) 312 | if err != nil { 313 | if instance.Spec.Replicas != int32(0) { 314 | reqLogger.Error(err, "failed to get pods", "Namespace", instance.Namespace, "Name", instance.Name) 315 | return ctrl.Result{}, err 316 | } 317 | peerState := fabricv1alpha1.StateSuspended 318 | reqLogger.Info("Update fabric peer status", "Namespace", instance.Namespace, "Name", instance.Name, "State", peerState) 319 | instance.Status.State = peerState 320 | err := r.Client.Update(ctx, instance) 321 | if err != nil { 322 | reqLogger.Error(err, "failed to update fabric peer status") 323 | return ctrl.Result{}, err 324 | } 325 | return ctrl.Result{}, nil 326 | } 327 | } 328 | 329 | peerState := "" 330 | 331 | if pod.Status.Phase != "Running" { 332 | peerState = fabricv1alpha1.StateCreating 333 | } else { 334 | for _, status := range pod.Status.ContainerStatuses { 335 | if status.Name == "peer" { 336 | if status.State.Running != nil { 337 | peerState = "Running" 338 | } else if status.RestartCount > 0 { 339 | peerState = "Error" 340 | } 341 | } 342 | } 343 | } 344 | 345 | // Update status.Nodes if needed 346 | if peerState != instance.Status.State { 347 | reqLogger.Info("Update fabricpeer status", "Namespace", instance.Namespace, "Name", instance.Name, "State", peerState) 348 | instance.Status.State = peerState 349 | err := r.Status().Update(ctx, instance) 350 | if err != nil { 351 | reqLogger.Error(err, "Failed to update FabricPeer status") 352 | return ctrl.Result{}, err 353 | } 354 | } 355 | 356 | podNames := []string{instance.Name + "-0"} 357 | 358 | // Update status.Nodes if needed 359 | if !reflect.DeepEqual(podNames, instance.Status.Nodes) { 360 | instance.Status.Nodes = podNames 361 | err := r.Status().Update(ctx, instance) 362 | if err != nil { 363 | reqLogger.Error(err, "Failed to update FabricPeer status") 364 | return ctrl.Result{}, err 365 | } 366 | } 367 | 368 | // Pod already exists - don't requeue 369 | reqLogger.Info("Skip reconcile: sts already exists", "Namespace", instance.Namespace, "Name", instance.Name) 370 | 371 | if peerState == fabricv1alpha1.StateRunning { 372 | return ctrl.Result{}, nil 373 | } 374 | return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil 375 | } 376 | 377 | // SetupWithManager sets up the controller with the Manager. 378 | func (r *FabricPeerReconciler) SetupWithManager(mgr ctrl.Manager) error { 379 | return ctrl.NewControllerManagedBy(mgr). 380 | For(&fabricv1alpha1.FabricPeer{}). 381 | Complete(r) 382 | } 383 | 384 | // newStatefulSetForPeer returns a StatefulSet for FabricPeer with the same name/namespace as the cr 385 | func newPeerStatefulSet(cr *fabricv1alpha1.FabricPeer) *appsv1.StatefulSet { 386 | // newPeerCluster sts for fabric peer cluster 387 | replicas := cr.Spec.Replicas 388 | 389 | labels := newPeerLabels(cr) 390 | 391 | init := resources.GetInitContainer(resources.VaultInit{ 392 | Organization: cr.Spec.Organization, 393 | CommonName: cr.Spec.CommonName, 394 | VaultAddress: config.VaultAddress, 395 | TLSPath: peerTLSPath, 396 | MSPPath: peerMSPPath, 397 | Cluster: cr.GetAnnotations()["region"], 398 | NodeType: "peer", 399 | }) //TODO 400 | 401 | init = append(init, newCouchdbInit()) 402 | 403 | sts := &appsv1.StatefulSet{ 404 | TypeMeta: metav1.TypeMeta{ 405 | Kind: "StatefulSet", 406 | APIVersion: "apps/v1", 407 | }, 408 | ObjectMeta: metav1.ObjectMeta{ 409 | Name: cr.Name, 410 | Namespace: cr.Namespace, 411 | Labels: labels, 412 | }, 413 | Spec: appsv1.StatefulSetSpec{ 414 | ServiceName: cr.Name, 415 | Replicas: &replicas, 416 | Selector: &metav1.LabelSelector{ 417 | MatchLabels: labels, 418 | }, 419 | Template: corev1.PodTemplateSpec{ 420 | ObjectMeta: metav1.ObjectMeta{ 421 | Labels: labels, 422 | }, 423 | Spec: corev1.PodSpec{ 424 | ServiceAccountName: "vault", 425 | InitContainers: init, 426 | Containers: newPeerContainers(cr), 427 | Volumes: newPeerVolumes(cr), 428 | }, 429 | }, 430 | UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ 431 | Type: appsv1.RollingUpdateStatefulSetStrategyType, 432 | }, 433 | //Volume Claims templates 434 | VolumeClaimTemplates: newPeerVolumeClaimTemplates(cr), 435 | }, 436 | } 437 | 438 | return sts 439 | } 440 | 441 | // newServiceForPeer returns a service for FabricPeer with the same name/namespace as the cr 442 | func newPeerContainers(cr *fabricv1alpha1.FabricPeer) []corev1.Container { 443 | var user int64 = 0 444 | privileged := true 445 | procMount := corev1.DefaultProcMount 446 | 447 | couchDBContainer := corev1.Container{ 448 | Name: "couchdb", 449 | ImagePullPolicy: corev1.PullIfNotPresent, 450 | Ports: []corev1.ContainerPort{ 451 | { 452 | Name: "containerport", 453 | ContainerPort: int32(5984), 454 | Protocol: "TCP", 455 | }, 456 | }, 457 | Resources: corev1.ResourceRequirements{ 458 | Limits: corev1.ResourceList{ 459 | corev1.ResourceMemory: resource.MustParse("500Mi"), 460 | corev1.ResourceCPU: resource.MustParse("200m"), 461 | }, 462 | Requests: corev1.ResourceList{ 463 | corev1.ResourceMemory: resource.MustParse("500Mi"), 464 | corev1.ResourceCPU: resource.MustParse("200m"), 465 | }, 466 | }, 467 | SecurityContext: &corev1.SecurityContext{ 468 | RunAsUser: &user, 469 | ProcMount: &procMount, 470 | }, 471 | VolumeMounts: []corev1.VolumeMount{ 472 | { 473 | Name: "peerdata", 474 | MountPath: "/opt/couchdb/data", 475 | SubPath: "data/couchdb", 476 | }, 477 | }, 478 | } 479 | 480 | couchDBImage := "hyperledger/fabric-couchdb:0.4.14" 481 | if cr.Spec.CouchDBImage != "" { 482 | couchDBImage = cr.Spec.CouchDBImage 483 | couchDBContainer.Env = []corev1.EnvVar{ 484 | { 485 | Name: "COUCHDB_USER", 486 | Value: "admin", 487 | }, 488 | { 489 | Name: "COUCHDB_PASSWORD", 490 | Value: "password", 491 | }, 492 | } 493 | } 494 | 495 | couchDBContainer.Image = couchDBImage 496 | 497 | dindImage := "docker:18.09.3-dind" 498 | if cr.Spec.DINDImage != "" { 499 | dindImage = cr.Spec.DINDImage 500 | } 501 | 502 | metricsImage := cr.Spec.MetricsImage 503 | if metricsImage == "" { 504 | metricsImage = "kompitech/fabric-node-metrics:latest" 505 | } 506 | 507 | baseContainers := []corev1.Container{ 508 | { 509 | Name: "peer", 510 | Image: cr.Spec.Image, 511 | ImagePullPolicy: corev1.PullIfNotPresent, 512 | WorkingDir: "/opt/gopath/src/github.com/hyperledger/fabric/peer", 513 | Ports: []corev1.ContainerPort{ 514 | { 515 | Name: "containerport1", 516 | Protocol: "TCP", 517 | ContainerPort: int32(7051), 518 | }, 519 | { 520 | Name: "containerport2", 521 | ContainerPort: int32(7052), 522 | Protocol: "TCP", 523 | }, 524 | { 525 | Name: "containerport3", 526 | ContainerPort: int32(7053), 527 | Protocol: "TCP", 528 | }, 529 | }, 530 | Command: []string{ 531 | "/bin/sh", 532 | }, 533 | Args: []string{ 534 | "-c", 535 | "peer node start", 536 | }, 537 | LivenessProbe: &corev1.Probe{ 538 | Handler: corev1.Handler{ 539 | HTTPGet: &corev1.HTTPGetAction{ 540 | Path: "/healthz", 541 | Port: intstr.FromInt(8080), 542 | }, 543 | }, 544 | InitialDelaySeconds: int32(30), 545 | PeriodSeconds: int32(5), 546 | }, 547 | ReadinessProbe: &corev1.Probe{ 548 | Handler: corev1.Handler{ 549 | HTTPGet: &corev1.HTTPGetAction{ 550 | Path: "/healthz", 551 | Port: intstr.FromInt(8080), 552 | }, 553 | }, 554 | InitialDelaySeconds: int32(10), 555 | PeriodSeconds: int32(5), 556 | FailureThreshold: int32(25), 557 | }, 558 | Resources: corev1.ResourceRequirements{ 559 | Limits: corev1.ResourceList{ 560 | corev1.ResourceMemory: resource.MustParse("300Mi"), 561 | corev1.ResourceCPU: resource.MustParse("200m"), 562 | }, 563 | Requests: corev1.ResourceList{ 564 | corev1.ResourceMemory: resource.MustParse("300Mi"), 565 | corev1.ResourceCPU: resource.MustParse("200m"), 566 | }, 567 | }, 568 | VolumeMounts: newPeerVolumeMounts(cr), 569 | TerminationMessagePath: "/dev/termination-log", 570 | TerminationMessagePolicy: "File", 571 | Env: newPeerContainerEnv(cr), 572 | }, 573 | { 574 | Name: "metrics", 575 | Image: metricsImage, 576 | ImagePullPolicy: corev1.PullAlways, 577 | SecurityContext: &corev1.SecurityContext{ 578 | Privileged: &privileged, 579 | ProcMount: &procMount, 580 | }, 581 | Resources: corev1.ResourceRequirements{ 582 | Limits: corev1.ResourceList{ 583 | corev1.ResourceMemory: resource.MustParse("50Mi"), 584 | corev1.ResourceCPU: resource.MustParse("50m"), 585 | }, 586 | }, 587 | Ports: []corev1.ContainerPort{ 588 | { 589 | Name: "metrics", 590 | Protocol: "TCP", 591 | ContainerPort: int32(9141), 592 | }, 593 | }, 594 | TerminationMessagePath: "/dev/termination-log", 595 | TerminationMessagePolicy: "File", 596 | Env: []corev1.EnvVar{ 597 | { 598 | Name: "NODE_NAME", 599 | Value: cr.Name, 600 | }, 601 | }, 602 | VolumeMounts: []corev1.VolumeMount{ 603 | { 604 | Name: "certificate", 605 | MountPath: peerMSPPath, 606 | SubPath: "data/msp", 607 | }, 608 | { 609 | Name: "certificate", 610 | MountPath: peerTLSPath, 611 | SubPath: "data/tls", 612 | }, 613 | }, 614 | }, 615 | { 616 | Name: "dind", 617 | Image: dindImage, 618 | ImagePullPolicy: corev1.PullIfNotPresent, 619 | SecurityContext: &corev1.SecurityContext{ 620 | Privileged: &privileged, 621 | ProcMount: &procMount, 622 | }, 623 | Resources: corev1.ResourceRequirements{ 624 | Limits: corev1.ResourceList{ 625 | corev1.ResourceMemory: resource.MustParse("1500Mi"), 626 | corev1.ResourceCPU: resource.MustParse("500m"), 627 | }, 628 | Requests: corev1.ResourceList{ 629 | corev1.ResourceMemory: resource.MustParse("1500Mi"), 630 | corev1.ResourceCPU: resource.MustParse("500m"), 631 | }, 632 | }, 633 | TerminationMessagePath: "/dev/termination-log", 634 | TerminationMessagePolicy: "File", 635 | }, 636 | couchDBContainer, 637 | } 638 | 639 | if cr.Spec.Containers != nil { 640 | for _, c := range cr.Spec.Containers { 641 | for _, cBase := range baseContainers { 642 | if c.Name == cBase.Name { 643 | if err := mergo.Merge(&cBase, c, mergo.WithOverride); err != nil { 644 | //Handle error 645 | } 646 | } 647 | } 648 | } 649 | } 650 | 651 | return baseContainers 652 | } 653 | 654 | func newPeerContainerEnv(cr *fabricv1alpha1.FabricPeer) []corev1.EnvVar { 655 | builderImage := "smolaon/fabric-ccenv:amd64-2.0.0-snapshot-e77813c85" 656 | if cr.Spec.BuilderImage != "" { 657 | builderImage = cr.Spec.BuilderImage 658 | } 659 | runtimeImage := "smolaon/fabric-baseos:amd64-2.0.0-snapshot-e77813c85" 660 | if cr.Spec.RuntimeImage != "" { 661 | runtimeImage = cr.Spec.RuntimeImage 662 | } 663 | 664 | env := []corev1.EnvVar{ 665 | { 666 | Name: "DOCKER_HOST", 667 | Value: "127.0.0.1", 668 | }, 669 | { 670 | Name: "CORE_LEDGER_STATE_STATEDATABASE", 671 | Value: "CouchDB", 672 | }, 673 | { 674 | Name: "CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS", 675 | Value: "localhost:5984", 676 | }, 677 | { 678 | Name: "CORE_VM_ENDPOINT", 679 | Value: "http://127.0.0.1:2375", 680 | }, 681 | { 682 | Name: "CORE_VM_DOCKER_ATTACHSTDOUT", 683 | Value: "true", 684 | }, 685 | { 686 | Name: "FABRIC_LOGGING_SPEC", 687 | Value: "info", 688 | }, 689 | { 690 | Name: "CORE_METRICS_PROVIDER", 691 | Value: "prometheus", 692 | }, 693 | { 694 | Name: "CORE_METRICS_PROMETHEUS_HANDLERPATH", 695 | Value: "/metrics", 696 | }, 697 | { 698 | Name: "CORE_OPERATIONS_LISTENADDRESS", 699 | Value: "0.0.0.0:8080", 700 | }, 701 | { 702 | Name: "CORE_PEER_TLS_ENABLED", 703 | Value: "true", 704 | }, 705 | { 706 | Name: "CORE_PEER_GOSSIP_USELEADERELECTION", 707 | Value: "true", 708 | }, 709 | { 710 | Name: "CORE_PEER_GOSSIP_ORGLEADER", 711 | Value: "false", 712 | }, 713 | { 714 | Name: "CORE_PEER_PROFILE_ENABLED", 715 | Value: "true", 716 | }, 717 | { 718 | Name: "CORE_PEER_MSPCONFIGPATH", 719 | Value: "/etc/hyperledger/fabric/msp", 720 | }, 721 | { 722 | Name: "CORE_PEER_TLS_CERT_FILE", 723 | Value: "/etc/hyperledger/fabric/tls/cert.crt", 724 | }, 725 | { 726 | Name: "CORE_PEER_TLS_KEY_FILE", 727 | Value: "/etc/hyperledger/fabric/tls/cert.key", 728 | }, 729 | { 730 | Name: "FABRIC_CA_CLIENT_TLS_CERTFILES", 731 | Value: "/etc/hyperledger/fabric/tls/ca.crt", 732 | }, 733 | { 734 | Name: "CORE_PEER_TLS_ROOTCERT_FILE", 735 | Value: "/etc/hyperledger/fabric/tls/ca.crt", 736 | }, 737 | { 738 | Name: "CORE_PEER_TLS_CLIENTAUTHREQUIRED", 739 | Value: "false", 740 | }, 741 | { 742 | Name: "CORE_PEER_TLS_CLIENTROOTCAS_FILES", 743 | Value: "/etc/hyperledger/fabric/tls/ca.crt", 744 | }, 745 | { 746 | Name: "CORE_PEER_TLS_CLIENTCERT_FILE", 747 | Value: "/etc/hyperledger/fabric/tls/cert.crt", 748 | }, 749 | { 750 | Name: "CORE_PEER_TLS_CLIENTKEY_FILE", 751 | Value: "/etc/hyperledger/fabric/tls/cert.key", 752 | }, 753 | { 754 | Name: "ORG_ADMIN_CERT", 755 | Value: "/etc/hyperledger/fabric/msp/admincerts/cert.pem", 756 | }, 757 | { 758 | Name: "CORE_PEER_ID", 759 | Value: cr.Spec.CommonName, 760 | }, 761 | { 762 | Name: "CORE_PEER_ADDRESS", 763 | Value: "$(CORE_PEER_ID):7051", 764 | }, 765 | { 766 | Name: "PEER_HOME", 767 | Value: "/opt/gopath/src/github.com/hyperledger/fabric/peer", 768 | }, 769 | { 770 | Name: "ORG_NAME", 771 | Value: cr.Spec.Organization, 772 | }, 773 | { 774 | Name: "CORE_PEER_GOSSIP_EXTERNALENDPOINT", 775 | Value: "$(CORE_PEER_ID):7051", 776 | }, 777 | { 778 | Name: "POD_IP", 779 | ValueFrom: &corev1.EnvVarSource{ 780 | FieldRef: &corev1.ObjectFieldSelector{ 781 | APIVersion: "v1", 782 | FieldPath: "status.podIP", 783 | }, 784 | }, 785 | }, 786 | { 787 | Name: "CORE_PEER_CHAINCODEADDRESS", 788 | Value: "$(POD_IP):7052", 789 | }, 790 | { 791 | Name: "CORE_PEER_CHAINCODELISTENADDRESS", 792 | Value: "0.0.0.0:7052", 793 | }, 794 | { 795 | Name: "CORE_PEER_LOCALMSPID", 796 | Value: cr.Spec.MspId, 797 | }, 798 | { 799 | Name: "GODEBUG", 800 | Value: "netdns=go", 801 | }, 802 | { 803 | Name: "CORE_CHAINCODE_BUILDER", 804 | Value: builderImage, 805 | }, 806 | { 807 | Name: "CORE_CHAINCODE_GOLANG_RUNTIME", 808 | Value: runtimeImage, 809 | }, 810 | } 811 | if cr.Spec.BootstrapNodeAddress != "" { 812 | env = append(env, corev1.EnvVar{ 813 | Name: "CORE_PEER_GOSSIP_BOOTSTRAP", 814 | Value: cr.Spec.BootstrapNodeAddress, 815 | }) 816 | } 817 | 818 | if cr.Spec.CouchDBImage != "" { 819 | additionalEnv := []corev1.EnvVar{ 820 | { 821 | Name: "CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME", 822 | Value: "admin", 823 | }, 824 | { 825 | Name: "CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD", 826 | Value: "password", 827 | }, 828 | } 829 | env = append(env, additionalEnv...) 830 | } 831 | 832 | return env 833 | } 834 | 835 | // newServiceForPeer returns a service for FabricPeer with the same name/namespace as the cr 836 | func newPeerService(cr *fabricv1alpha1.FabricPeer) *corev1.Service { 837 | annotations := make(map[string]string) 838 | 839 | annotations["fqdn"] = cr.Spec.CommonName 840 | annotations["prometheus.io/scrape"] = "true" 841 | annotations["prometheus.io/port"] = "8080" 842 | 843 | var svcObjectMeta metav1.ObjectMeta 844 | var svcSpec corev1.ServiceSpec 845 | svcObjectMeta = metav1.ObjectMeta{ 846 | Name: cr.GetName(), 847 | Namespace: cr.GetNamespace(), 848 | Labels: newPeerLabels(cr), 849 | Annotations: annotations, 850 | } 851 | 852 | svcSpec = corev1.ServiceSpec{ 853 | Selector: newPeerLabels(cr), 854 | Type: cr.Spec.SvcType, 855 | Ports: []corev1.ServicePort{ 856 | { 857 | Name: "grpc-ext-listen-endpoint", 858 | Protocol: "TCP", 859 | Port: int32(7051), 860 | TargetPort: intstr.FromInt(int(7051)), 861 | }, 862 | { 863 | Name: "grpc-chaincode-listen", 864 | Protocol: "TCP", 865 | Port: int32(7052), 866 | TargetPort: intstr.FromInt(int(7052)), 867 | }, 868 | { 869 | Name: "metrics", 870 | Protocol: "TCP", 871 | Port: int32(8080), 872 | TargetPort: intstr.FromInt(int(8080)), 873 | }, 874 | }, 875 | } 876 | 877 | if cr.Spec.SvcType == "Headless" { 878 | svcSpec.Type = "None" 879 | } 880 | 881 | return &corev1.Service{ 882 | TypeMeta: metav1.TypeMeta{ 883 | Kind: "Service", 884 | APIVersion: "v1", 885 | }, 886 | ObjectMeta: svcObjectMeta, 887 | Spec: svcSpec, 888 | } 889 | } 890 | 891 | // newServiceForPeer returns a service for FabricPeer with the same name/namespace as the cr 892 | func newPeerVolumes(cr *fabricv1alpha1.FabricPeer) []corev1.Volume { 893 | volumes := []corev1.Volume{ 894 | { 895 | Name: "run", 896 | VolumeSource: corev1.VolumeSource{ 897 | HostPath: &corev1.HostPathVolumeSource{ 898 | Path: "/run", 899 | }, 900 | }, 901 | }, 902 | } 903 | 904 | if cr.Spec.NodeOUsEnabled { 905 | volumes = append(volumes, corev1.Volume{ 906 | Name: "node-ous", 907 | VolumeSource: corev1.VolumeSource{ 908 | ConfigMap: &corev1.ConfigMapVolumeSource{ 909 | LocalObjectReference: corev1.LocalObjectReference{ 910 | Name: "node-ous", 911 | }, 912 | }, 913 | }, 914 | }) 915 | } 916 | 917 | volumes = append(volumes, corev1.Volume{ 918 | Name: cr.GetName() + "-cacerts", 919 | VolumeSource: corev1.VolumeSource{ 920 | Secret: &corev1.SecretVolumeSource{ 921 | SecretName: cr.GetName() + "-cacerts", 922 | }, 923 | }, 924 | }) 925 | 926 | volumes = append(volumes, corev1.Volume{ 927 | Name: cr.GetName() + "-tlscacerts", 928 | VolumeSource: corev1.VolumeSource{ 929 | Secret: &corev1.SecretVolumeSource{ 930 | SecretName: cr.GetName() + "-tlscacerts", 931 | }, 932 | }, 933 | }) 934 | 935 | return volumes 936 | } 937 | 938 | func newPeerVolumeMounts(cr *fabricv1alpha1.FabricPeer) []corev1.VolumeMount { 939 | //Basic static volume mounts 940 | volumeMounts := []corev1.VolumeMount{ 941 | { 942 | Name: "certificate", 943 | MountPath: peerMSPPath, 944 | SubPath: "data/msp", 945 | }, 946 | { 947 | Name: "certificate", 948 | MountPath: peerTLSPath, 949 | SubPath: "data/tls", 950 | }, 951 | { 952 | Name: "peerdata", 953 | MountPath: "/var/hyperledger/production", 954 | SubPath: "data/peerdata", 955 | }, 956 | // { 957 | // Name: "run", 958 | // MountPath: "/host/var/run/", 959 | // }, 960 | } 961 | 962 | if cr.Spec.NodeOUsEnabled { 963 | volumeMounts = append(volumeMounts, corev1.VolumeMount{ 964 | Name: "node-ous", 965 | MountPath: "/etc/hyperledger/fabric/msp/config.yaml", 966 | SubPath: "config.yaml", 967 | }) 968 | } 969 | 970 | //Add volume mounts for secrets with certificates 971 | volumeMounts = append(volumeMounts, corev1.VolumeMount{ 972 | Name: cr.ObjectMeta.Name + "-cacerts", 973 | MountPath: ordererMSPPath + "cacerts", 974 | }) 975 | volumeMounts = append(volumeMounts, corev1.VolumeMount{ 976 | Name: cr.ObjectMeta.Name + "-tlscacerts", 977 | MountPath: ordererMSPPath + "tlscacerts", 978 | }) 979 | 980 | return volumeMounts 981 | } 982 | 983 | func newPeerVolumeClaimTemplates(cr *fabricv1alpha1.FabricPeer) []corev1.PersistentVolumeClaim { 984 | return []corev1.PersistentVolumeClaim{ 985 | { 986 | TypeMeta: metav1.TypeMeta{ 987 | Kind: "PersistentVolumeClaim", 988 | APIVersion: "v1", 989 | }, 990 | ObjectMeta: metav1.ObjectMeta{ 991 | Name: "peerdata", 992 | Namespace: cr.Namespace, 993 | }, 994 | Spec: corev1.PersistentVolumeClaimSpec{ 995 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 996 | Resources: corev1.ResourceRequirements{ 997 | Requests: corev1.ResourceList{ 998 | corev1.ResourceStorage: cr.Spec.DataVolumeSize, 999 | }, 1000 | }, 1001 | }, 1002 | }, 1003 | { 1004 | TypeMeta: metav1.TypeMeta{ 1005 | Kind: "PersistentVolumeClaim", 1006 | APIVersion: "v1", 1007 | }, 1008 | ObjectMeta: metav1.ObjectMeta{ 1009 | Name: "certificate", 1010 | Namespace: cr.Namespace, 1011 | }, 1012 | Spec: corev1.PersistentVolumeClaimSpec{ 1013 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 1014 | Resources: corev1.ResourceRequirements{ 1015 | Requests: corev1.ResourceList{ 1016 | corev1.ResourceStorage: cr.Spec.CertVolumeSize, 1017 | }, 1018 | }, 1019 | }, 1020 | }, 1021 | } 1022 | } 1023 | 1024 | func newPeerLabels(cr *fabricv1alpha1.FabricPeer) map[string]string { 1025 | 1026 | return map[string]string{ 1027 | "app": cr.Kind, 1028 | "name": cr.Name, 1029 | "peer_cr": cr.Name, 1030 | } 1031 | } 1032 | 1033 | func newCouchdbInit() corev1.Container { 1034 | 1035 | return corev1.Container{ 1036 | Name: "couchdb-init", 1037 | Image: "hyperledger/fabric-couchdb:0.4.14", 1038 | Command: []string{ 1039 | "/bin/sh", 1040 | "-c", 1041 | "chown -R couchdb:couchdb /data", 1042 | }, 1043 | VolumeMounts: []corev1.VolumeMount{ 1044 | { 1045 | Name: "peerdata", 1046 | MountPath: "/data", 1047 | SubPath: "data/couchdb", 1048 | }, 1049 | }, 1050 | } 1051 | } 1052 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "path/filepath" 21 | "testing" 22 | 23 | . "github.com/onsi/ginkgo" 24 | . "github.com/onsi/gomega" 25 | "k8s.io/client-go/kubernetes/scheme" 26 | "k8s.io/client-go/rest" 27 | "sigs.k8s.io/controller-runtime/pkg/client" 28 | "sigs.k8s.io/controller-runtime/pkg/envtest" 29 | "sigs.k8s.io/controller-runtime/pkg/envtest/printer" 30 | logf "sigs.k8s.io/controller-runtime/pkg/log" 31 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 32 | 33 | fabricv1alpha1 "github.com/KompiTech/hyperledger-fabric-operator/api/v1alpha1" 34 | //+kubebuilder:scaffold:imports 35 | ) 36 | 37 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 38 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 39 | 40 | var cfg *rest.Config 41 | var k8sClient client.Client 42 | var testEnv *envtest.Environment 43 | 44 | func TestAPIs(t *testing.T) { 45 | RegisterFailHandler(Fail) 46 | 47 | RunSpecsWithDefaultAndCustomReporters(t, 48 | "Controller Suite", 49 | []Reporter{printer.NewlineReporter{}}) 50 | } 51 | 52 | var _ = BeforeSuite(func() { 53 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 54 | 55 | By("bootstrapping test environment") 56 | testEnv = &envtest.Environment{ 57 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 58 | ErrorIfCRDPathMissing: true, 59 | } 60 | 61 | cfg, err := testEnv.Start() 62 | Expect(err).NotTo(HaveOccurred()) 63 | Expect(cfg).NotTo(BeNil()) 64 | 65 | err = fabricv1alpha1.AddToScheme(scheme.Scheme) 66 | Expect(err).NotTo(HaveOccurred()) 67 | 68 | err = fabricv1alpha1.AddToScheme(scheme.Scheme) 69 | Expect(err).NotTo(HaveOccurred()) 70 | 71 | //+kubebuilder:scaffold:scheme 72 | 73 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 74 | Expect(err).NotTo(HaveOccurred()) 75 | Expect(k8sClient).NotTo(BeNil()) 76 | 77 | }, 60) 78 | 79 | var _ = AfterSuite(func() { 80 | By("tearing down the test environment") 81 | err := testEnv.Stop() 82 | Expect(err).NotTo(HaveOccurred()) 83 | }) 84 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/KompiTech/hyperledger-fabric-operator 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/imdario/mergo v0.3.12 7 | github.com/jiribroulik/pkg v0.0.0-20211008112707-0c482bacdc23 8 | github.com/knative/pkg v0.0.0-20191107185656-884d50f09454 // indirect 9 | github.com/onsi/ginkgo v1.16.4 10 | github.com/onsi/gomega v1.13.0 11 | github.com/pkg/errors v0.9.1 12 | go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 13 | k8s.io/api v0.21.2 14 | k8s.io/apimachinery v0.21.2 15 | k8s.io/client-go v0.21.2 16 | sigs.k8s.io/controller-runtime v0.9.2 17 | ) 18 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "flag" 21 | "os" 22 | 23 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 24 | // to ensure that exec-entrypoint and run can make use of them. 25 | crd "github.com/jiribroulik/pkg/apis/istio/v1alpha3" 26 | "k8s.io/apimachinery/pkg/runtime" 27 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 28 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 29 | _ "k8s.io/client-go/plugin/pkg/client/auth" 30 | ctrl "sigs.k8s.io/controller-runtime" 31 | "sigs.k8s.io/controller-runtime/pkg/healthz" 32 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 33 | 34 | fabricv1alpha1 "github.com/KompiTech/hyperledger-fabric-operator/api/v1alpha1" 35 | "github.com/KompiTech/hyperledger-fabric-operator/controllers" 36 | //+kubebuilder:scaffold:imports 37 | ) 38 | 39 | var ( 40 | scheme = runtime.NewScheme() 41 | setupLog = ctrl.Log.WithName("setup") 42 | ) 43 | 44 | func init() { 45 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 46 | utilruntime.Must(crd.AddToScheme(scheme)) 47 | utilruntime.Must(fabricv1alpha1.AddToScheme(scheme)) 48 | //+kubebuilder:scaffold:scheme 49 | } 50 | 51 | func main() { 52 | var metricsAddr string 53 | var enableLeaderElection bool 54 | var probeAddr string 55 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 56 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 57 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 58 | "Enable leader election for controller manager. "+ 59 | "Enabling this will ensure there is only one active controller manager.") 60 | opts := zap.Options{ 61 | Development: true, 62 | } 63 | opts.BindFlags(flag.CommandLine) 64 | flag.Parse() 65 | 66 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 67 | 68 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 69 | Scheme: scheme, 70 | MetricsBindAddress: metricsAddr, 71 | Port: 9443, 72 | HealthProbeBindAddress: probeAddr, 73 | LeaderElection: enableLeaderElection, 74 | LeaderElectionID: "69a4dde1.kompitech.com", 75 | }) 76 | if err != nil { 77 | setupLog.Error(err, "unable to start manager") 78 | os.Exit(1) 79 | } 80 | 81 | if err = (&controllers.FabricPeerReconciler{ 82 | Client: mgr.GetClient(), 83 | Scheme: mgr.GetScheme(), 84 | }).SetupWithManager(mgr); err != nil { 85 | setupLog.Error(err, "unable to create controller", "controller", "FabricPeer") 86 | os.Exit(1) 87 | } 88 | if err = (&controllers.FabricOrdererReconciler{ 89 | Client: mgr.GetClient(), 90 | Scheme: mgr.GetScheme(), 91 | }).SetupWithManager(mgr); err != nil { 92 | setupLog.Error(err, "unable to create controller", "controller", "FabricOrderer") 93 | os.Exit(1) 94 | } 95 | //+kubebuilder:scaffold:builder 96 | 97 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 98 | setupLog.Error(err, "unable to set up health check") 99 | os.Exit(1) 100 | } 101 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 102 | setupLog.Error(err, "unable to set up ready check") 103 | os.Exit(1) 104 | } 105 | 106 | setupLog.Info("starting manager") 107 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 108 | setupLog.Error(err, "problem running manager") 109 | os.Exit(1) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /pkg/config/config.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 KompiTech GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package config 18 | 19 | import ( 20 | "os" 21 | ) 22 | 23 | // Change below variables to serve metrics on different host or port. 24 | var ( 25 | VaultAddress = "https://vault-cluster.vault:8200" 26 | EtcdAddress = "localhost" 27 | ) 28 | 29 | func init() { 30 | 31 | vaultAddressEnv, exists := os.LookupEnv("OPERATOR_VAULT_ADDRESS") 32 | if exists { 33 | VaultAddress = vaultAddressEnv 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /pkg/resources/common.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 KompiTech GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package resources 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // NewNamespace returns new K8S namespace 25 | func NewNamespace(name string) *corev1.Namespace { 26 | return &corev1.Namespace{ 27 | TypeMeta: metav1.TypeMeta{ 28 | Kind: "Namespace", 29 | APIVersion: "v1", 30 | }, 31 | ObjectMeta: metav1.ObjectMeta{ 32 | Name: name, 33 | }, 34 | } 35 | } 36 | 37 | // NewServiceAccount returns new K8S service account 38 | func NewServiceAccount(name string, namespace string) *corev1.ServiceAccount { 39 | return &corev1.ServiceAccount{ 40 | 41 | TypeMeta: metav1.TypeMeta{ 42 | Kind: "ServiceAccount", 43 | APIVersion: "v1", 44 | }, 45 | ObjectMeta: metav1.ObjectMeta{ 46 | Name: name, 47 | Namespace: namespace, 48 | }, 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /pkg/resources/dns.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 KompiTech GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package resources 18 | 19 | import ( 20 | "context" 21 | "crypto/tls" 22 | "crypto/x509" 23 | "encoding/json" 24 | "io/ioutil" 25 | "strings" 26 | "time" 27 | 28 | "github.com/pkg/errors" 29 | 30 | "go.etcd.io/etcd/clientv3" 31 | logf "sigs.k8s.io/controller-runtime/pkg/log" 32 | ) 33 | 34 | const ( 35 | // etcdEndpoint = "localhost:2379" // for local run 36 | etcdEndpoint = "etcd-client.etcd:2379" 37 | etcdTimeout = 15 38 | ) 39 | 40 | var log = logf.Log.WithName("resources_dns") 41 | 42 | func reverse(s string) string { 43 | n := len(s) 44 | runes := make([]rune, n) 45 | for _, rune := range s { 46 | n-- 47 | runes[n] = rune 48 | } 49 | return string(runes[n:]) 50 | } 51 | 52 | func getSkyDNSEntry(fqdn string) string { 53 | 54 | // split into slice by dot . 55 | addressSlice := strings.Split(fqdn, ".") 56 | reverseSlice := []string{} 57 | 58 | for i := range addressSlice { 59 | octet := addressSlice[len(addressSlice)-1-i] 60 | reverseSlice = append(reverseSlice, octet) 61 | } 62 | 63 | S := strings.Join(reverseSlice, "/") 64 | // log.Info("DNS name: %v", S) 65 | //S = reverse(strings.Replace(reverse(S), ".", "/", strings.Count(S, "-")-1)) 66 | //log.Printf("DNS name reversed: %v", S) 67 | return S 68 | } 69 | 70 | // CheckDNS checks if DNS entry exists and creates it if not 71 | func CheckDNS(clusterIP, fqdn string) error { 72 | if fqdn == "" { 73 | return errors.Errorf("Fqdn cannot be empty for clusterIP: %v", clusterIP) 74 | } 75 | if clusterIP == "" { 76 | return errors.Errorf("Fqdn cannot be empty for fqdn: %v", fqdn) 77 | } 78 | // log.Info("checking dns entry for fqdn %v, current IP is: %v", fqdn, clusterIP) 79 | // tlsConfig, err := getTLSConfig() 80 | // if err != nil { 81 | // log.Printf("ERROR getting tls config for etcd client during checking dns for service: %v", svc) 82 | // return err 83 | // } 84 | cli, err := clientv3.New(clientv3.Config{ 85 | Endpoints: []string{etcdEndpoint}, 86 | DialTimeout: etcdTimeout * time.Second, 87 | // TLS: tlsConfig, // uncomment once tls on etcd is desired 88 | }) 89 | if err != nil { 90 | log.Error(err, "getting etcd client during fqdn:"+fqdn+" check failed") 91 | return err 92 | } 93 | defer cli.Close() 94 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 95 | fqdn = "/skydns/" + getSkyDNSEntry(fqdn) 96 | 97 | // get current IP entry in etcd 98 | resp, err := cli.Get(ctx, fqdn) 99 | defer cancel() 100 | 101 | // log.Info("DNS response %v, error: %v", resp, err) 102 | if err != nil || resp == nil { 103 | log.Error(err, "getting fqdn: "+fqdn+" from skydns failed") 104 | return errors.Errorf("ERROR: while getting fqdn: %v from skydns", fqdn) 105 | } 106 | // check if update is needed 107 | if resp.Count > 0 { 108 | result := make(map[string]string) 109 | err = json.Unmarshal(resp.Kvs[0].Value, &result) 110 | if result["host"] != clusterIP { 111 | // log.Info("Updating entry for fqdn %v, current IP is: %v", fqdn, clusterIP) 112 | err = updateDNSRecord(fqdn, clusterIP) 113 | if err != nil { 114 | log.Error(err, "Update entry failed for fqdn "+fqdn+", current IP is: "+clusterIP+", etcdIP: "+result["host"]) 115 | return err 116 | } 117 | } 118 | } else { 119 | // log.Info("Creating entry for fqdn %v, current IP is: %v", fqdn, clusterIP) 120 | err = updateDNSRecord(fqdn, clusterIP) 121 | if err != nil { 122 | log.Error(err, "Create entry failed for fqdn "+fqdn+", current IP is: "+clusterIP) 123 | return err 124 | } 125 | } 126 | return nil 127 | } 128 | 129 | func getTLSConfig() (*tls.Config, error) { 130 | cert, err := tls.LoadX509KeyPair("../cert.crt", "../cert.key") 131 | if err != nil { 132 | return nil, err 133 | } 134 | cacert, err := ioutil.ReadFile("../ca.crt") 135 | if err != nil { 136 | return nil, err 137 | } 138 | caCertPool := x509.NewCertPool() 139 | caCertPool.AppendCertsFromPEM([]byte(cacert)) 140 | 141 | // Setup HTTPS client 142 | tlsConfig := &tls.Config{ 143 | Certificates: []tls.Certificate{cert}, 144 | RootCAs: caCertPool, 145 | InsecureSkipVerify: false, 146 | } 147 | return tlsConfig, nil 148 | } 149 | 150 | func updateDNSRecord(key, ip string) error { 151 | // tlsConfig, err := getTLSConfig() 152 | // if err != nil { 153 | // log.Printf("ERROR getting tls config for etcd client during updating key: %v", key) 154 | // return err 155 | // } 156 | cli, err := clientv3.New(clientv3.Config{ 157 | Endpoints: []string{etcdEndpoint}, 158 | DialTimeout: etcdTimeout * time.Second, 159 | // TLS: tlsConfig, 160 | }) 161 | if err != nil { 162 | log.Error(err, "getting etcd client during key: "+key+" put failed") 163 | return err 164 | } 165 | defer cli.Close() 166 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 167 | 168 | _, err = cli.Put(ctx, key, "{\"host\":\""+ip+"\"}") 169 | defer cancel() 170 | // log.Info("Put response: %v, error: %v, key: %v", resp, err, key) 171 | 172 | if err != nil { 173 | log.Error(err, "updating record in etcd key: "+key+" failed") 174 | return err 175 | } 176 | 177 | // log.Info("DNS record: %v updated to %v", key, ip) 178 | return nil 179 | } 180 | 181 | // DeleteDNS deletes DNS record 182 | func DeleteDNS(fqdn string) error { 183 | if fqdn == "" { 184 | return errors.Errorf("ERROR invalid fqdn while deleting dns record: %v", fqdn) 185 | } 186 | // log.Info("deleting dns entry for key: %v", fqdn) 187 | // tlsConfig, err := getTLSConfig() 188 | // if err != nil { 189 | // log.Printf("ERROR getting tls config for etcd client during deleting key: %v", fqdn) 190 | // return err 191 | // } 192 | cli, err := clientv3.New(clientv3.Config{ 193 | Endpoints: []string{etcdEndpoint}, 194 | DialTimeout: etcdTimeout * time.Second, 195 | // TLS: tlsConfig, 196 | }) 197 | if err != nil { 198 | return errors.Errorf("ERROR getting etcd client during key: %v deletion", fqdn) 199 | } 200 | defer cli.Close() 201 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 202 | fqdn = "/skydns/" + getSkyDNSEntry(fqdn) 203 | 204 | // Delete key from etcd 205 | resp, err := cli.Delete(ctx, fqdn) 206 | cancel() 207 | if err != nil || resp == nil { 208 | log.Error(err, "deleting key "+fqdn+" failed") 209 | return err 210 | } 211 | return nil 212 | } 213 | -------------------------------------------------------------------------------- /pkg/resources/istio.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 KompiTech GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package resources 18 | 19 | import ( 20 | crd "github.com/jiribroulik/pkg/apis/istio/v1alpha3" 21 | 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | ) 24 | 25 | const ( 26 | extPort = 7051 27 | ccPort = 7052 28 | eventPort = 7053 29 | ordererPort = 7050 30 | ) 31 | 32 | //GatewayTemplate describes gateway 33 | type GatewayTemplate struct { 34 | Name string 35 | Namespace string 36 | Servers *[]crd.Server 37 | Label map[string]string 38 | OwnerReferences []metav1.OwnerReference 39 | } 40 | 41 | //VirtualServiceTemplate describes virtual service 42 | type VirtualServiceTemplate struct { 43 | Name string 44 | Namespace string 45 | Spec *crd.VirtualServiceSpec 46 | Label map[string]string 47 | OwnerReferences []metav1.OwnerReference 48 | } 49 | 50 | // NewVirtualService returns new virtual service 51 | func NewVirtualService(vsvc VirtualServiceTemplate) *crd.VirtualService { 52 | 53 | vsvcObjectMeta := metav1.ObjectMeta{ 54 | Name: vsvc.Name, 55 | Namespace: vsvc.Namespace, 56 | Labels: vsvc.Label, 57 | OwnerReferences: vsvc.OwnerReferences, 58 | } 59 | return &crd.VirtualService{ 60 | TypeMeta: metav1.TypeMeta{ 61 | Kind: "VirtualService", 62 | APIVersion: "networking.istio.io/v1alpha3", 63 | }, 64 | ObjectMeta: vsvcObjectMeta, 65 | Spec: *vsvc.Spec, 66 | } 67 | } 68 | 69 | // NewGateway returns new gateway 70 | func NewGateway(gtw GatewayTemplate) *crd.Gateway { 71 | 72 | gtwObjectMeta := metav1.ObjectMeta{ 73 | Name: gtw.Name, 74 | Namespace: gtw.Namespace, 75 | Labels: gtw.Label, 76 | OwnerReferences: gtw.OwnerReferences, 77 | } 78 | selector := map[string]string{"istio": "ingressgateway"} 79 | gtwSpec := crd.GatewaySpec{Selector: selector, Servers: *gtw.Servers} 80 | 81 | return &crd.Gateway{ 82 | TypeMeta: metav1.TypeMeta{ 83 | Kind: "Gateway", 84 | APIVersion: "networking.istio.io/v1alpha3", 85 | }, 86 | ObjectMeta: gtwObjectMeta, 87 | Spec: gtwSpec, 88 | } 89 | } 90 | 91 | // GetPeerVirtualServiceSpec returns virtual service spec for peer 92 | func GetPeerVirtualServiceSpec(name, target string) *crd.VirtualServiceSpec { 93 | spec := crd.VirtualServiceSpec{ 94 | Hosts: []string{target}, 95 | Gateways: []string{name}, 96 | } 97 | 98 | dst1 := crd.Destination{Port: crd.PortSelector{Number: uint32(extPort)}, Host: name} 99 | tcpMatchreq1 := []crd.TlsL4MatchAttributes{{Port: extPort, SniHosts: []string{target}}} 100 | dstWeight1 := []crd.DestinationWeight{{Destination: dst1, Weight: 100}} 101 | tcpRoute1 := crd.TLSRoute{Match: tcpMatchreq1, Route: dstWeight1} 102 | 103 | dst2 := crd.Destination{Port: crd.PortSelector{Number: uint32(ccPort)}, Host: name} 104 | tcpMatchreq2 := []crd.TlsL4MatchAttributes{{Port: ccPort, SniHosts: []string{target}}} 105 | dstWeight2 := []crd.DestinationWeight{{Destination: dst2, Weight: 100}} 106 | tcpRoute2 := crd.TLSRoute{Match: tcpMatchreq2, Route: dstWeight2} 107 | 108 | spec.Tls = []crd.TLSRoute{tcpRoute1, tcpRoute2} 109 | return &spec 110 | } 111 | 112 | // GetOrdererVirtualServiceSpec returns virtual service spec for orderer 113 | func GetOrdererVirtualServiceSpec(name, target string) *crd.VirtualServiceSpec { 114 | spec := crd.VirtualServiceSpec{ 115 | Hosts: []string{target}, 116 | Gateways: []string{name}, 117 | } 118 | 119 | dst := crd.Destination{Port: crd.PortSelector{Number: uint32(ordererPort)}, Host: name} 120 | tcpMatchreq := []crd.TlsL4MatchAttributes{{Port: ordererPort, SniHosts: []string{target}}} 121 | dstWeight := []crd.DestinationWeight{{Destination: dst, Weight: 100}} 122 | tcpRoute := crd.TLSRoute{Match: tcpMatchreq, Route: dstWeight} 123 | 124 | spec.Tls = []crd.TLSRoute{tcpRoute} 125 | return &spec 126 | } 127 | 128 | // GetPeerServerPorts returns server ports for peer 129 | func GetPeerServerPorts(target string) *[]crd.Server { 130 | server1 := crd.Server{Port: crd.Port{Name: "https-ext-listen-endpoint", Protocol: crd.ProtocolHTTPS, Number: extPort}, Hosts: []string{target}, TLS: &crd.TLSOptions{Mode: crd.TLSModePassThrough, SubjectAltNames: []string{}}} 131 | server2 := crd.Server{Port: crd.Port{Name: "https-chaincode-listen", Protocol: crd.ProtocolHTTPS, Number: ccPort}, Hosts: []string{target}, TLS: &crd.TLSOptions{Mode: crd.TLSModePassThrough, SubjectAltNames: []string{}}} 132 | return &[]crd.Server{server1, server2} 133 | } 134 | 135 | // GetOrdererServerPorts returns server ports for orderer 136 | func GetOrdererServerPorts(target string) *[]crd.Server { 137 | server := crd.Server{Port: crd.Port{Name: "https-orderer", Protocol: crd.ProtocolHTTPS, Number: ordererPort}, Hosts: []string{target}, TLS: &crd.TLSOptions{Mode: crd.TLSModePassThrough, SubjectAltNames: []string{}}} 138 | return &[]crd.Server{server} 139 | } 140 | -------------------------------------------------------------------------------- /pkg/resources/vault.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 KompiTech GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package resources 18 | 19 | import ( 20 | "strings" 21 | 22 | corev1 "k8s.io/api/core/v1" 23 | ) 24 | 25 | // VaultInit vault init container configuration 26 | type VaultInit struct { 27 | Organization string 28 | CommonName string 29 | VaultAddress string 30 | TLSPath string 31 | MSPPath string 32 | Cluster string 33 | NodeType string 34 | } 35 | 36 | // GetInitContainer returns Vault init container spec 37 | func GetInitContainer(vault VaultInit) []corev1.Container { 38 | 39 | // VaultAddress := "https://10.27.247.45:8200" 40 | // TLSPath := "/etc/hyperledger/fabric/tls" 41 | // MSPPath := "/etc/hyperledger/fabric/msp" 42 | 43 | cmd := `if [ ! -f "$MSP_PATH"/keystore/peer.key ]; then 44 | export KUBE_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token); 45 | mkdir "$MSP_PATH"/signcerts; 46 | mkdir "$MSP_PATH"/keystore; 47 | mkdir "$MSP_PATH"/cacerts; 48 | mkdir /etc/vault; 49 | curl --request POST -k --data '{"jwt": "'"$KUBE_TOKEN"'", "role": "hyperledger"}' -k "$VAULT_ADDRESS"/v1/auth/kubernetes-"$REGION_NAME"/login | jq -j '.auth.client_token' > /etc/vault/token; 50 | export X_VAULT_TOKEN=$(cat /etc/vault/token); 51 | curl -XPOST -k -H "X-Vault-Token: $X_VAULT_TOKEN" -d '{"common_name": "'$COMMON_NAME'"}' "$VAULT_ADDRESS"/v1/"$ORG_ID"/issue/` + strings.Title(vault.NodeType) + ` > /tmp/response; 52 | cat /tmp/response | jq -r -j .data.certificate > "$MSP_PATH"/signcerts/peer.crt; 53 | cat /tmp/response | jq -r -j .data.private_key > "$MSP_PATH"/keystore/peer.key; 54 | cat /tmp/response | jq -r -j .data.issuing_ca > "$MSP_PATH"/cacerts/ca.crt; 55 | fi;` 56 | 57 | if vault.NodeType == "orderer" { 58 | // s := `if [ ! -f "$TLS_PATH"/kafka_cert.key ]; then 59 | // curl -XGET -k -H "X-Vault-Token: $X_VAULT_TOKEN" "$VAULT_ADDRESS"/v1/secret/kafka-cluster-int-kv > /tmp/tlsresponse; 60 | // cat /tmp/tlsresponse | jq -r -j .data.ca_cert > "$TLS_PATH"/kafka_ca.crt; 61 | // curl -XPOST -k -H "X-Vault-Token: $X_VAULT_TOKEN" -d '{"common_name": "'$COMMON_NAME'.kompitech.com"}' "$VAULT_ADDRESS"/v1/kafka-client-int/issue/TLS > /tmp/tlsresponse; 62 | // cat /tmp/tlsresponse | jq -r -j .data.certificate > "$TLS_PATH"/kafka_cert.crt; 63 | // cat /tmp/tlsresponse | jq -r -j .data.private_key > "$TLS_PATH"/kafka_cert.key; 64 | // fi` 65 | s := `if [ ! -f "$TLS_PATH"/cert.key ]; then 66 | curl -k -H "X-Vault-Token: $X_VAULT_TOKEN" "$VAULT_ADDRESS"/v1/"$ORG_ID"-kv/"$CORE_PEER_NAME"/tls > /tmp/tlsresponse; 67 | cat /tmp/tlsresponse | jq -r -j .data.cert > "$TLS_PATH"/cert.crt; 68 | cat /tmp/tlsresponse | jq -r -j .data.key > "$TLS_PATH"/cert.key; 69 | cat /tmp/tlsresponse | jq -r -j .data.ca > "$TLS_PATH"/ca.crt; 70 | fi` 71 | cmd = cmd + s 72 | } else { 73 | s := `if [ ! -f "$TLS_PATH"/cert.key ]; then 74 | curl -XPOST -k -H "X-Vault-Token: $X_VAULT_TOKEN" -d '{"common_name": "'$COMMON_NAME'"}' "$VAULT_ADDRESS"/v1/"$ORG_ID"/issue/TLS > /tmp/tlsresponse; 75 | cat /tmp/tlsresponse | jq -r -j .data.certificate > "$TLS_PATH"/cert.crt; 76 | cat /tmp/tlsresponse | jq -r -j .data.private_key > "$TLS_PATH"/cert.key; 77 | cat /tmp/tlsresponse | jq -r -j .data.issuing_ca > "$TLS_PATH"/ca.crt; 78 | fi` 79 | cmd = cmd + s 80 | } 81 | 82 | return []corev1.Container{ 83 | { 84 | Name: "vault-init", 85 | Image: "everpeace/curl-jq", 86 | Command: []string{ 87 | "/bin/sh", 88 | "-c", 89 | cmd, 90 | }, 91 | VolumeMounts: []corev1.VolumeMount{ 92 | { 93 | Name: "certificate", 94 | MountPath: vault.MSPPath, 95 | SubPath: "data/msp", 96 | }, 97 | { 98 | Name: "certificate", 99 | MountPath: vault.TLSPath, 100 | SubPath: "data/tls", 101 | }, 102 | }, 103 | Env: []corev1.EnvVar{ 104 | { 105 | Name: "CORE_PEER_ID", 106 | ValueFrom: &corev1.EnvVarSource{ 107 | FieldRef: &corev1.ObjectFieldSelector{ 108 | FieldPath: "metadata.name", 109 | }, 110 | }, 111 | }, 112 | { 113 | Name: "CORE_PEER_NAME", 114 | ValueFrom: &corev1.EnvVarSource{ 115 | FieldRef: &corev1.ObjectFieldSelector{ 116 | FieldPath: "metadata.labels['name']", 117 | }, 118 | }, 119 | }, 120 | { 121 | Name: "REGION_NAME", 122 | Value: vault.Cluster, 123 | }, 124 | { 125 | Name: "COMMON_NAME", 126 | Value: vault.CommonName, 127 | }, 128 | { 129 | Name: "ORG_ID", 130 | Value: vault.Organization, 131 | }, 132 | { 133 | Name: "TLS_PATH", 134 | Value: vault.TLSPath, 135 | }, 136 | { 137 | Name: "MSP_PATH", 138 | Value: vault.MSPPath, 139 | }, 140 | { 141 | Name: "VAULT_ADDRESS", 142 | Value: vault.VaultAddress, 143 | }, 144 | }, 145 | }, 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 KompiTech GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package version 18 | 19 | var ( 20 | Version = "0.0.1" 21 | ) 22 | --------------------------------------------------------------------------------