├── .bingo ├── .gitignore ├── README.md ├── Variables.mk ├── bingo.mod ├── bingo.sum ├── controller-gen.mod ├── controller-gen.sum ├── go-junit-report.mod ├── go-junit-report.sum ├── go.mod ├── gofumports.mod ├── gofumports.sum ├── golangci-lint.mod ├── golangci-lint.sum ├── junitmerge.mod ├── junitmerge.sum ├── junitreport.mod ├── junitreport.sum ├── kustomize.mod ├── kustomize.sum ├── operator-sdk.mod ├── operator-sdk.sum ├── opm.mod ├── opm.sum ├── promtool.mod ├── promtool.sum └── variables.env ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md └── pull_request_template.md ├── .gitignore ├── .output └── .keep ├── Dockerfile ├── Dockerfile.dev ├── Dockerfile.in ├── Dockerfile.src ├── LICENSE ├── Makefile ├── OWNERS ├── PROJECT ├── README.md ├── apis └── logging │ └── v1 │ ├── elasticsearch_types.go │ ├── groupversion_info.go │ ├── index_management_types.go │ ├── kibana_types.go │ └── zz_generated.deepcopy.go ├── build └── Dockerfile ├── bundle.Dockerfile ├── bundle ├── manifests │ ├── elasticsearch-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml │ ├── elasticsearch-operator-metrics_v1_service.yaml │ ├── elasticsearch-operator.clusterserviceversion.yaml │ ├── leader-election-role_rbac.authorization.k8s.io_v1_role.yaml │ ├── leader-election-rolebinding_rbac.authorization.k8s.io_v1_rolebinding.yaml │ ├── logging.openshift.io_elasticsearches.yaml │ ├── logging.openshift.io_kibanas.yaml │ ├── metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml │ ├── metrics-reader_rbac.authorization.k8s.io_v1beta1_clusterrole.yaml │ ├── prometheus_rbac.authorization.k8s.io_v1_role.yaml │ ├── prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml │ ├── proxy-role_rbac.authorization.k8s.io_v1_clusterrole.yaml │ └── proxy-rolebinding_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml ├── metadata │ ├── annotations.yaml │ └── properties.yaml └── tests │ └── scorecard │ └── config.yaml ├── config ├── config.yaml ├── crd │ ├── bases │ │ ├── logging.openshift.io_elasticsearches.yaml │ │ └── logging.openshift.io_kibanas.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── default │ ├── kustomization.yaml │ └── manager_auth_proxy_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── bases │ │ └── elasticsearch-operator.clusterserviceversion.yaml │ └── kustomization.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── elasticsearch_editor_role.yaml │ ├── elasticsearch_viewer_role.yaml │ ├── kibana_editor_role.yaml │ ├── kibana_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── prometheus_role.yaml │ ├── prometheus_role_binding.yaml │ ├── role.yaml │ └── role_binding.yaml └── samples │ ├── kustomization.yaml │ ├── logging_v1_elasticsearch.yaml │ └── logging_v1_kibana.yaml ├── controllers └── logging │ ├── elasticsearch_controller.go │ ├── kibana_controller.go │ └── secret_controller.go ├── dev-meta.yaml ├── docs ├── HACKING.md ├── REVIEW.md ├── access-control.md ├── alerts.md ├── images │ ├── access-control.svg │ └── troubleshooting │ │ ├── ES_Metrics_Dashboard.png │ │ ├── ES_Metrics_forcemerge.png │ │ ├── areachart_visualization.png │ │ ├── barchart_visualization.png │ │ └── dashboard_logs_by_namespace.png ├── pvc-recovery.md ├── troubleshooting.md └── troubleshooting_resources │ ├── query_1.json │ └── query_2.json ├── files ├── dashboards │ └── logging-dashboard-elasticsearch.json ├── prometheus_alerts.yml └── prometheus_recording_rules.yml ├── go.mod ├── go.sum ├── golangci.yaml ├── hack ├── build-image.sh ├── cert_generation.sh ├── common ├── cr.yaml ├── cvo-unmanage-console.yaml ├── deploy-example-secrets.sh ├── deploy-image.sh ├── deploy-setup.sh ├── generate-dockerfile-from-midstream ├── image-stream-build-config-template.yaml ├── kibana-cr.yaml ├── lib │ ├── build │ │ ├── archive.sh │ │ ├── binaries.sh │ │ ├── constants.sh │ │ ├── environment.sh │ │ ├── images.sh │ │ ├── release.sh │ │ ├── rpm.sh │ │ └── version.sh │ ├── cleanup.sh │ ├── cmd.sh │ ├── compress.awk │ ├── init.sh │ ├── log │ │ ├── output.sh │ │ ├── stacktrace.sh │ │ └── system.sh │ ├── start.sh │ ├── test │ │ └── junit.sh │ └── util │ │ ├── docs.sh │ │ ├── ensure.sh │ │ ├── environment.sh │ │ ├── find.sh │ │ ├── golang.sh │ │ ├── logs.sh │ │ ├── misc.sh │ │ ├── text.sh │ │ └── trap.sh ├── lint-dockerfile ├── prometheus-operator-crd-cluster-roles.yaml ├── test-e2e.sh ├── testing-olm-upgrade │ ├── pre-upgrade-commands.sh │ ├── resources │ │ └── cr.yaml │ ├── test-upgrade-n-1-n.sh │ └── upgrade-common └── testing-olm │ ├── assertions │ ├── test-001-operator-sdk-e2e.sh │ ├── test-200-verify-es-metrics-access.sh │ ├── test-657-im-block-autocreate-for-write-suffix.sh │ └── utils ├── internal ├── constants │ └── constants.go ├── elasticsearch │ ├── certificates.go │ ├── cluster.go │ ├── cluster_test.go │ ├── clusterrestart.go │ ├── clusterrestart_test.go │ ├── common.go │ ├── common_test.go │ ├── configmaps.go │ ├── configmaps_test.go │ ├── configuration_tmpl.go │ ├── cr.go │ ├── dashboards.go │ ├── defaults.go │ ├── defaults_test.go │ ├── deployment.go │ ├── deployment_test.go │ ├── elasticsearch.go │ ├── elasticsearch_suite_test.go │ ├── elasticsearch_test.go │ ├── esclient │ │ ├── client.go │ │ ├── client_test.go │ │ ├── cluster.go │ │ ├── cluster_test.go │ │ ├── health.go │ │ ├── helper.go │ │ ├── indices.go │ │ ├── indices_test.go │ │ ├── nodes.go │ │ ├── replicas.go │ │ ├── shards.go │ │ ├── templates.go │ │ └── templates_test.go │ ├── nodetypefactory.go │ ├── prometheus_rule.go │ ├── prometheus_rule_test.go │ ├── rbac.go │ ├── reconciler.go │ ├── recovery.go │ ├── secret.go │ ├── securitycontextconstraints.go │ ├── service.go │ ├── service_monitor.go │ ├── service_monitor_test.go │ ├── service_test.go │ ├── serviceaccount.go │ ├── statefulset.go │ ├── status.go │ ├── status_test.go │ ├── util.go │ └── util_test.go ├── indexmanagement │ ├── converters.go │ ├── converters_test.go │ ├── expectations_test.go │ ├── index_management_test.go │ ├── indexmanagement_suite_test.go │ ├── reconcile.go │ ├── reconcile_test.go │ ├── scripts.go │ ├── validations.go │ ├── validations_mappings_test.go │ ├── validations_policies_test.go │ └── validations_test.go ├── kibana │ ├── defaults.go │ ├── deployment.go │ ├── helpers_test.go │ ├── kibana_suite_test.go │ ├── kibana_test.go │ ├── kibanarequest.go │ ├── reconciler.go │ ├── reconciler_test.go │ ├── route.go │ ├── secret.go │ ├── serviceaccount.go │ ├── status.go │ └── trustedcabundle.go ├── manifests │ ├── configmap │ │ ├── build.go │ │ └── configmap.go │ ├── console │ │ ├── build.go │ │ ├── consoleexternalloglink.go │ │ └── consolelink.go │ ├── cronjob │ │ ├── build.go │ │ └── cronjob.go │ ├── deployment │ │ ├── build.go │ │ └── deployment.go │ ├── persistentvolume │ │ ├── build.go │ │ └── persistentvolumeclaim.go │ ├── pod │ │ ├── build.go │ │ ├── build_test.go │ │ ├── compare.go │ │ ├── compare_test.go │ │ └── pod.go │ ├── prometheusrule │ │ ├── build.go │ │ └── prometheusrule.go │ ├── rbac │ │ ├── build.go │ │ ├── clusterrole.go │ │ ├── clusterrolebinding.go │ │ ├── role.go │ │ └── rolebinding.go │ ├── route │ │ ├── build.go │ │ └── route.go │ ├── secret │ │ ├── build.go │ │ └── secret.go │ ├── securitycontextconstraints │ │ ├── build.go │ │ └── securitycontextconstraints.go │ ├── service │ │ ├── build.go │ │ └── service.go │ ├── serviceaccount │ │ ├── build.go │ │ └── serviceaccount.go │ ├── servicemonitor │ │ ├── build.go │ │ └── servicemonitor.go │ └── statefulset │ │ ├── build.go │ │ └── statefulset.go ├── metrics │ └── metrics.go ├── types │ └── elasticsearch │ │ └── types.go └── utils │ ├── comparators │ ├── envvars.go │ ├── envvars_test.go │ ├── maps.go │ ├── resources.go │ ├── selectors.go │ ├── tolerations.go │ ├── versions.go │ ├── versions_test.go │ └── volume_mounts.go │ ├── resources.go │ ├── utils.go │ └── utils_test.go ├── main.go ├── olm_deploy ├── operatorregistry │ ├── Dockerfile │ ├── catalog-source.yaml │ ├── elasticsearch-operator.package.yaml │ ├── registry-deployment.yaml │ └── service.yaml ├── scripts │ ├── catalog-build.sh │ ├── catalog-deploy.sh │ ├── catalog-uninstall.sh │ ├── env.sh │ ├── operator-install.sh │ ├── operator-uninstall.sh │ ├── registry-init.sh │ └── wait_for_deployment.sh └── subscription │ ├── operator-group.yaml │ └── subscription.yaml ├── origin-meta.yaml ├── test ├── e2e │ ├── elasticsearch_metric_test.go │ ├── elasticsearch_test.go │ ├── elasticsearch_write_test.go │ ├── kibana_test.go │ ├── main_test.go │ └── utils.go ├── files │ ├── dummycrd.yaml │ ├── emptyToken │ ├── prometheus-unit-tests │ │ └── test.yml │ └── testToken ├── helpers │ ├── elasticsearch.go │ ├── envvars.go │ ├── json.go │ ├── runtime │ │ └── client.go │ └── yaml.go └── utils │ └── utils.go └── version └── version.go /.bingo/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Ignore everything 3 | * 4 | 5 | # But not these files: 6 | !.gitignore 7 | !*.mod 8 | !*.sum 9 | !README.md 10 | !Variables.mk 11 | !variables.env 12 | 13 | *tmp.mod 14 | -------------------------------------------------------------------------------- /.bingo/README.md: -------------------------------------------------------------------------------- 1 | # Project Development Dependencies. 2 | 3 | This is directory which stores Go modules with pinned buildable package that is used within this repository, managed by https://github.com/bwplotka/bingo. 4 | 5 | * Run `bingo get` to install all tools having each own module file in this directory. 6 | * Run `bingo get ` to install that have own module file in this directory. 7 | * For Makefile: Make sure to put `include .bingo/Variables.mk` in your Makefile, then use $() variable where is the .bingo/.mod. 8 | * For shell: Run `source .bingo/variables.env` to source all environment variable for each tool. 9 | * For go: Import `.bingo/variables.go` to for variable names. 10 | * See https://github.com/bwplotka/bingo or -h on how to add, remove or change binaries dependencies. 11 | 12 | ## Requirements 13 | 14 | * Go 1.14+ 15 | -------------------------------------------------------------------------------- /.bingo/bingo.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.20 4 | 5 | require github.com/bwplotka/bingo v0.8.0 6 | -------------------------------------------------------------------------------- /.bingo/controller-gen.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.20 4 | 5 | require sigs.k8s.io/controller-tools v0.11.3 // cmd/controller-gen 6 | -------------------------------------------------------------------------------- /.bingo/go-junit-report.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.18 4 | 5 | require github.com/jstemmer/go-junit-report v0.9.1 6 | -------------------------------------------------------------------------------- /.bingo/go-junit-report.sum: -------------------------------------------------------------------------------- 1 | github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= 2 | github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= 3 | -------------------------------------------------------------------------------- /.bingo/go.mod: -------------------------------------------------------------------------------- 1 | module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files. -------------------------------------------------------------------------------- /.bingo/gofumports.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.18 4 | 5 | require mvdan.cc/gofumpt v0.0.0-20201027171050-85d5401eb0f6 // gofumports 6 | -------------------------------------------------------------------------------- /.bingo/golangci-lint.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.20 4 | 5 | require github.com/golangci/golangci-lint v1.51.2 // cmd/golangci-lint 6 | -------------------------------------------------------------------------------- /.bingo/junitmerge.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.18 4 | 5 | require github.com/openshift/release v0.0.0-20201103150245-a5287ef1495b // tools/junitmerge 6 | -------------------------------------------------------------------------------- /.bingo/junitmerge.sum: -------------------------------------------------------------------------------- 1 | github.com/openshift/release v0.0.0-20201103150245-a5287ef1495b h1:KNChnzZbuEQNUytnkWn7E36pfYz5Y7X1niWfvf71FAU= 2 | github.com/openshift/release v0.0.0-20201103150245-a5287ef1495b/go.mod h1:BbszAfAbRsw+lZvDi5xxQlQCgzF3/DyRmocnobVHAak= 3 | -------------------------------------------------------------------------------- /.bingo/junitreport.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.18 4 | 5 | require github.com/openshift/release v0.0.0-20201103082000-d8009dcf7503 // tools/junitreport 6 | -------------------------------------------------------------------------------- /.bingo/junitreport.sum: -------------------------------------------------------------------------------- 1 | github.com/openshift/origin v4.1.0+incompatible h1:sdf+1/92CKR24IqOR/o4IBsJCku8/ddu/bAesnnh1Nw= 2 | github.com/openshift/origin v4.1.0+incompatible/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= 3 | github.com/openshift/release v0.0.0-20201103082000-d8009dcf7503 h1:wytv3mOyyCTxms4EbKJzi2pGU84pbmKsd4ARhrdQ9yo= 4 | github.com/openshift/release v0.0.0-20201103082000-d8009dcf7503/go.mod h1:BbszAfAbRsw+lZvDi5xxQlQCgzF3/DyRmocnobVHAak= 5 | -------------------------------------------------------------------------------- /.bingo/kustomize.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.18 4 | 5 | require sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 6 | -------------------------------------------------------------------------------- /.bingo/operator-sdk.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.20 4 | 5 | replace github.com/containerd/containerd => github.com/containerd/containerd v1.4.11 6 | 7 | replace github.com/docker/distribution => github.com/docker/distribution v0.0.0-20191216044856-a8371794149d 8 | 9 | replace github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.10.0 10 | 11 | replace go.opentelemetry.io/otel => go.opentelemetry.io/otel v0.20.0 12 | 13 | replace go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v0.20.0 14 | 15 | replace go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v0.20.0 16 | 17 | replace go.opentelemetry.io/proto/otlp => go.opentelemetry.io/proto/otlp v0.7.0 18 | 19 | require github.com/operator-framework/operator-sdk v1.27.0 // cmd/operator-sdk 20 | -------------------------------------------------------------------------------- /.bingo/opm.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.20 4 | 5 | replace github.com/docker/distribution => github.com/docker/distribution v0.0.0-20191216044856-a8371794149d 6 | 7 | require github.com/operator-framework/operator-registry v1.26.4 // cmd/opm 8 | -------------------------------------------------------------------------------- /.bingo/promtool.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.18 4 | 5 | replace k8s.io/klog => github.com/simonpasquier/klog-gokit v0.1.0 6 | 7 | require github.com/prometheus/prometheus v1.8.2-0.20200522113006-f4dd45609a05 // cmd/promtool 8 | -------------------------------------------------------------------------------- /.bingo/variables.env: -------------------------------------------------------------------------------- 1 | # Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.8. DO NOT EDIT. 2 | # All tools are designed to be build inside $GOBIN. 3 | # Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk. 4 | GOBIN=${GOBIN:=$(go env GOBIN)} 5 | 6 | if [ -z "$GOBIN" ]; then 7 | GOBIN="$(go env GOPATH)/bin" 8 | fi 9 | 10 | 11 | BINGO="${GOBIN}/bingo-v0.8.0" 12 | 13 | CONTROLLER_GEN="${GOBIN}/controller-gen-v0.11.3" 14 | 15 | GO_JUNIT_REPORT="${GOBIN}/go-junit-report-v0.9.1" 16 | 17 | GOFUMPORTS="${GOBIN}/gofumports-v0.0.0-20201027171050-85d5401eb0f6" 18 | 19 | GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.51.2" 20 | 21 | JUNITMERGE="${GOBIN}/junitmerge-v0.0.0-20201103150245-a5287ef1495b" 22 | 23 | JUNITREPORT="${GOBIN}/junitreport-v0.0.0-20201103082000-d8009dcf7503" 24 | 25 | KUSTOMIZE="${GOBIN}/kustomize-v4.5.7" 26 | 27 | OPERATOR_SDK="${GOBIN}/operator-sdk-v1.27.0" 28 | 29 | OPM="${GOBIN}/opm-v1.26.4" 30 | 31 | PROMTOOL="${GOBIN}/promtool-v1.8.2-0.20200522113006-f4dd45609a05" 32 | 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 21 | 22 | **Describe the bug** 23 | A clear and concise description of what the bug is. 24 | 25 | **Environment** 26 | - Versions of OpenShift, Cluster Logging and any other relevant components 27 | - ClusterLogging instance 28 | 29 | **Logs** 30 | Capture relevant logs, post them to http://gist.github.com/ and post the links in the issue. 31 | 32 | **Expected behavior** 33 | A clear and concise description of what you expected to happen. 34 | 35 | **Actual behavior** 36 | A clear and concise description of what actually happened. 37 | 38 | **To Reproduce** 39 | Steps to reproduce the behavior: 40 | 1. 41 | 2. 42 | 3. 43 | 44 | **Additional context** 45 | Add any other context about the problem here. 46 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Description 2 | 3 | 4 | /cc 5 | /assign 6 | 7 | /cherry-pick 8 | 9 | ### Links 10 | 11 | - Depending on PR(s): 12 | - Bugzilla: 13 | - Github issue: 14 | - JIRA: 15 | - Enhancement proposal: 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Bin output 2 | bin/* 3 | 4 | # Temporary Build Files 5 | tmp/_output 6 | tmp/_test 7 | _output 8 | pf.log 9 | 10 | # GoLand 11 | .idea 12 | 13 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 14 | 15 | ### Emacs ### 16 | # -*- mode: gitignore; -*- 17 | *~ 18 | \#*\# 19 | /.emacs.desktop 20 | /.emacs.desktop.lock 21 | *.elc 22 | auto-save-list 23 | tramp 24 | .\#* 25 | 26 | # Org-mode 27 | .org-id-locations 28 | *_archive 29 | 30 | # flymake-mode 31 | *_flymake.* 32 | 33 | # eshell files 34 | /eshell/history 35 | /eshell/lastdir 36 | 37 | # elpa packages 38 | /elpa/ 39 | 40 | # reftex files 41 | *.rel 42 | 43 | # AUCTeX auto folder 44 | /auto/ 45 | 46 | # cask packages 47 | .cask/ 48 | dist/ 49 | 50 | # Flycheck 51 | flycheck_*.el 52 | 53 | # server auth directory 54 | /server/ 55 | 56 | # projectiles files 57 | .projectile 58 | projectile-bookmarks.eld 59 | 60 | # directory configuration 61 | .dir-locals.el 62 | 63 | # saveplace 64 | places 65 | 66 | # url cache 67 | url/cache/ 68 | 69 | # cedet 70 | ede-projects.el 71 | 72 | # smex 73 | smex-items 74 | 75 | # company-statistics 76 | company-statistics-cache.el 77 | 78 | # anaconda-mode 79 | anaconda-mode/ 80 | 81 | ### Go ### 82 | # Binaries for programs and plugins 83 | *.exe 84 | *.exe~ 85 | *.dll 86 | *.so 87 | *.dylib 88 | 89 | # Test binary, build with 'go test -c' 90 | *.test 91 | 92 | # Output of the go coverage tool, specifically when used with LiteIDE 93 | *.out 94 | 95 | ### Vim ### 96 | # swap 97 | .sw[a-p] 98 | .*.sw[a-p] 99 | # session 100 | Session.vim 101 | # temporary 102 | .netrwhist 103 | # auto-generated tag files 104 | tags 105 | 106 | ### VisualStudioCode ### 107 | .vscode/* 108 | !.vscode/settings.json 109 | !.vscode/tasks.json 110 | !.vscode/launch.json 111 | !.vscode/extensions.json 112 | .history 113 | 114 | 115 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 116 | /.zz_generate_timestamp 117 | /tmp 118 | .output/ 119 | .cache/ -------------------------------------------------------------------------------- /.output/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/.output/.keep -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ### This is a generated file from Dockerfile.in ### 2 | #@follow_tag(registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder:rhel_9_golang_1.20) 3 | FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.20-openshift-4.14 AS builder 4 | 5 | ENV BUILD_VERSION=${CI_CONTAINER_VERSION} 6 | ENV OS_GIT_MAJOR=${CI_X_VERSION} 7 | ENV OS_GIT_MINOR=${CI_Y_VERSION} 8 | ENV OS_GIT_PATCH=${CI_Z_VERSION} 9 | ENV SOURCE_GIT_COMMIT=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_COMMIT} 10 | ENV SOURCE_GIT_URL=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_URL} 11 | 12 | 13 | WORKDIR /go/src/github.com/openshift/elasticsearch-operator 14 | 15 | COPY ${REMOTE_SOURCE}/apis apis 16 | COPY ${REMOTE_SOURCE}/controllers controllers 17 | COPY ${REMOTE_SOURCE}/files files 18 | COPY ${REMOTE_SOURCE}/internal internal 19 | COPY ${REMOTE_SOURCE}/bundle bundle 20 | COPY ${REMOTE_SOURCE}/version version 21 | COPY ${REMOTE_SOURCE}/.bingo ./.bingo 22 | ADD ${REMOTE_SOURCE}/Makefile ${REMOTE_SOURCE}/main.go ${REMOTE_SOURCE}/go.mod ${REMOTE_SOURCE}/go.sum ./ 23 | 24 | RUN make build 25 | 26 | #@follow_tag(registry.redhat.io/ubi9:latest) 27 | FROM registry.redhat.io/ubi9:latest 28 | LABEL \ 29 | io.k8s.display-name="OpenShift elasticsearch-operator" \ 30 | io.k8s.description="This is the component that manages an Elasticsearch cluster on a kubernetes based platform" \ 31 | io.openshift.tags="openshift,logging,elasticsearch" \ 32 | com.redhat.delivery.appregistry="false" \ 33 | License="Apache-2.0" \ 34 | maintainer="AOS Logging " \ 35 | name="openshift-logging/elasticsearch-rhel8-operator" \ 36 | com.redhat.component="elasticsearch-operator-container" \ 37 | io.openshift.maintainer.product="OpenShift Container Platform" \ 38 | io.openshift.build.commit.id=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_COMMIT} \ 39 | io.openshift.build.source-location=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_URL} \ 40 | io.openshift.build.commit.url=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_URL}/commit/${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_COMMIT} \ 41 | version=${CI_CONTAINER_VERSION} 42 | 43 | ENV ALERTS_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_alerts.yml" 44 | ENV RULES_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_recording_rules.yml" 45 | ENV ES_DASHBOARD_FILE="/etc/elasticsearch-operator/files/dashboards/logging-dashboard-elasticsearch.json" 46 | ENV RUNBOOK_BASE_URL="https://github.com/openshift/elasticsearch-operator/blob/master/docs/alerts.md" 47 | 48 | COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/bin/elasticsearch-operator /usr/bin/ 49 | COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/files/ /etc/elasticsearch-operator/files/ 50 | COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/bundle /bundle 51 | RUN mkdir /tmp/ocp-eo && \ 52 | chmod og+w /tmp/ocp-eo 53 | 54 | WORKDIR /usr/bin 55 | ENTRYPOINT ["elasticsearch-operator"] 56 | 57 | -------------------------------------------------------------------------------- /Dockerfile.dev: -------------------------------------------------------------------------------- 1 | ### This is a generated file from Dockerfile.in ### 2 | #@follow_tag(registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder:rhel_9_golang_1.20) 3 | FROM registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.20-openshift-4.14 AS builder 4 | 5 | ENV BUILD_VERSION=${CI_CONTAINER_VERSION} 6 | ENV OS_GIT_MAJOR=${CI_X_VERSION} 7 | ENV OS_GIT_MINOR=${CI_Y_VERSION} 8 | ENV OS_GIT_PATCH=${CI_Z_VERSION} 9 | ENV SOURCE_GIT_COMMIT=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_COMMIT} 10 | ENV SOURCE_GIT_URL=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_URL} 11 | 12 | 13 | WORKDIR /go/src/github.com/openshift/elasticsearch-operator 14 | 15 | COPY ${REMOTE_SOURCE}/apis apis 16 | COPY ${REMOTE_SOURCE}/controllers controllers 17 | COPY ${REMOTE_SOURCE}/files files 18 | COPY ${REMOTE_SOURCE}/internal internal 19 | COPY ${REMOTE_SOURCE}/bundle bundle 20 | COPY ${REMOTE_SOURCE}/version version 21 | COPY ${REMOTE_SOURCE}/.bingo ./.bingo 22 | ADD ${REMOTE_SOURCE}/Makefile ${REMOTE_SOURCE}/main.go ${REMOTE_SOURCE}/go.mod ${REMOTE_SOURCE}/go.sum ./ 23 | 24 | RUN make build 25 | 26 | #@follow_tag(registry.redhat.io/ubi9:latest) 27 | FROM registry.redhat.io/ubi9:latest 28 | LABEL \ 29 | io.k8s.display-name="OpenShift elasticsearch-operator" \ 30 | io.k8s.description="This is the component that manages an Elasticsearch cluster on a kubernetes based platform" \ 31 | io.openshift.tags="openshift,logging,elasticsearch" \ 32 | com.redhat.delivery.appregistry="false" \ 33 | License="Apache-2.0" \ 34 | maintainer="AOS Logging " \ 35 | name="openshift-logging/elasticsearch-rhel8-operator" \ 36 | com.redhat.component="elasticsearch-operator-container" \ 37 | io.openshift.maintainer.product="OpenShift Container Platform" \ 38 | io.openshift.build.commit.id=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_COMMIT} \ 39 | io.openshift.build.source-location=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_URL} \ 40 | io.openshift.build.commit.url=${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_URL}/commit/${CI_ELASTICSEARCH_OPERATOR_UPSTREAM_COMMIT} \ 41 | version=${CI_CONTAINER_VERSION} 42 | 43 | ENV ALERTS_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_alerts.yml" 44 | ENV RULES_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_recording_rules.yml" 45 | ENV ES_DASHBOARD_FILE="/etc/elasticsearch-operator/files/dashboards/logging-dashboard-elasticsearch.json" 46 | ENV RUNBOOK_BASE_URL="https://github.com/openshift/elasticsearch-operator/blob/master/docs/alerts.md" 47 | 48 | COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/bin/elasticsearch-operator /usr/bin/ 49 | COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/files/ /etc/elasticsearch-operator/files/ 50 | COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/bundle /bundle 51 | RUN mkdir /tmp/ocp-eo && \ 52 | chmod og+w /tmp/ocp-eo 53 | 54 | WORKDIR /usr/bin 55 | ENTRYPOINT ["elasticsearch-operator"] 56 | 57 | -------------------------------------------------------------------------------- /Dockerfile.src: -------------------------------------------------------------------------------- 1 | # This dockerfile is needed for api.ci to build+promote the source 2 | # image for consumption from other repo jobs 3 | FROM src 4 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md 2 | filters: 3 | ".*": 4 | approvers: 5 | - alanconway 6 | - jcantrill 7 | - JoaoBraveCoding 8 | - xperimental 9 | reviewers: 10 | - alanconway 11 | - btaani 12 | - jcantrill 13 | - JoaoBraveCoding 14 | - shwetaap 15 | - xperimental 16 | "Dockerfile(?:\\.in)?$": # matches Dockerfile, Dockerfile.in 17 | labels: 18 | - midstream/Dockerfile 19 | component: "Logging" 20 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: openshift.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | multigroup: true 5 | plugins: 6 | manifests.sdk.operatorframework.io/v2: {} 7 | scorecard.sdk.operatorframework.io/v2: {} 8 | projectName: elasticsearch-operator 9 | repo: github.com/openshift/elasticsearch-operator 10 | resources: 11 | - api: 12 | crdVersion: v1 13 | namespaced: true 14 | controller: true 15 | domain: openshift.io 16 | group: logging 17 | kind: Elasticsearch 18 | path: github.com/openshift/elasticsearch-operator/apis/logging/v1 19 | version: v1 20 | - api: 21 | crdVersion: v1 22 | namespaced: true 23 | controller: true 24 | domain: openshift.io 25 | group: logging 26 | kind: Kibana 27 | path: github.com/openshift/elasticsearch-operator/apis/logging/v1 28 | version: v1 29 | version: "3" 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # elasticsearch-operator 2 | 3 | Elasticsearch operator to run Elasticsearch cluster on top of Openshift and Kubernetes. 4 | Operator uses [Operator Framework SDK](https://github.com/operator-framework/operator-sdk). 5 | 6 | ## Why Use An Operator? 7 | 8 | Operator is designed to provide self-service for the Elasticsearch cluster operations, see [Operator Capability Levels](https://sdk.operatorframework.io/docs/overview/#operator-capability-level). 9 | 10 | - Elasticsearch operator ensures proper layout of the pods 11 | - Elasticsearch operator enables proper rolling cluster restarts 12 | - Elasticsearch operator provides kubectl interface to manage your Elasticsearch cluster 13 | - Elasticsearch operator provides kubectl interface to monitor your Elasticsearch cluster 14 | 15 | To experiment or contribute to the development of elasticsearch-operator, see [HACKING.md](./docs/HACKING.md) and [REVIEW.md](./docs/REVIEW.md) 16 | -------------------------------------------------------------------------------- /apis/logging/v1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // Package v1 contains API Schema definitions for the logging v1 API group 2 | // +kubebuilder:object:generate=true 3 | // +groupName=logging.openshift.io 4 | package v1 5 | 6 | import ( 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | "sigs.k8s.io/controller-runtime/pkg/scheme" 9 | ) 10 | 11 | var ( 12 | // GroupVersion is group version used to register these objects 13 | GroupVersion = schema.GroupVersion{Group: "logging.openshift.io", Version: "v1"} 14 | 15 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 16 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 17 | 18 | // AddToScheme adds the types in this group-version to the given scheme. 19 | AddToScheme = SchemeBuilder.AddToScheme 20 | ) 21 | 22 | // +kubebuilder:rbac:groups=console.openshift.io,resources=consolelinks;consoleexternalloglinks,verbs=get;create;update;delete 23 | // +kubebuilder:rbac:groups=logging.openshift.io,resources=*,verbs=* 24 | // +kubebuilder:rbac:groups=core,resources=pods;pods/exec;services;endpoints;persistentvolumeclaims;events;configmaps;secrets;serviceaccounts;services/finalizers,verbs=* 25 | // +kubebuilder:rbac:groups=route.openshift.io,resources=routes;routes/custom-host,verbs="*" 26 | // +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=* 27 | // +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=* 28 | // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=prometheusrules;servicemonitors,verbs=* 29 | // +kubebuilder:rbac:groups=oauth.openshift.io,resources=oauthclients,verbs=* 30 | // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;clusterrolebindings,verbs=* 31 | // +kubebuilder:rbac:urls=/metrics,verbs=get 32 | // +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews;subjectaccessreviews,verbs=create 33 | // +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create 34 | // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=* 35 | // +kubebuilder:rbac:groups=config.openshift.io,resources=proxies;oauths,verbs=get;list;watch 36 | // +kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=create;delete 37 | // +kubebuilder:rbac:groups=apps,resourceNames=elasticsearch-operator,resources=deployments/finalizers,verbs=update 38 | // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;create;update 39 | // +kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=get;list;watch;create;update 40 | // +kubebuilder:rbac:groups=image.openshift.io,resources=imagestreams,verbs=get;list;watch 41 | -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/build/Dockerfile -------------------------------------------------------------------------------- /bundle.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | # Core bundle labels. 4 | LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 5 | LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ 6 | LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ 7 | LABEL operators.operatorframework.io.bundle.package.v1=elasticsearch-operator 8 | LABEL operators.operatorframework.io.bundle.channels.v1=stable,stable-5.8 9 | LABEL operators.operatorframework.io.bundle.channel.default.v1=stable 10 | LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown 11 | LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 12 | LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 13 | 14 | # Labels for testing. 15 | LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 16 | LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ 17 | 18 | # Copy files to locations specified by labels. 19 | COPY bundle/manifests /manifests/ 20 | COPY bundle/metadata /metadata/ 21 | COPY bundle/tests/scorecard /tests/scorecard/ 22 | -------------------------------------------------------------------------------- /bundle/manifests/elasticsearch-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | name: elasticsearch-operator 6 | name: elasticsearch-operator-metrics-monitor 7 | spec: 8 | endpoints: 9 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 10 | interval: 30s 11 | path: /metrics 12 | scheme: https 13 | scrapeTimeout: 10s 14 | targetPort: 8443 15 | tlsConfig: 16 | caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt 17 | serverName: elasticsearch-operator-metrics.openshift-operators-redhat.svc 18 | selector: 19 | matchLabels: 20 | name: elasticsearch-operator 21 | -------------------------------------------------------------------------------- /bundle/manifests/elasticsearch-operator-metrics_v1_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.openshift.io/serving-cert-secret-name: elasticsearch-operator-metrics 6 | creationTimestamp: null 7 | labels: 8 | name: elasticsearch-operator 9 | name: elasticsearch-operator-metrics 10 | spec: 11 | ports: 12 | - name: https 13 | port: 8443 14 | protocol: TCP 15 | targetPort: https 16 | selector: 17 | name: elasticsearch-operator 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /bundle/manifests/leader-election-role_rbac.authorization.k8s.io_v1_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | name: elasticsearch-operator 7 | name: leader-election-role 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - create 18 | - update 19 | - patch 20 | - delete 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - configmaps/status 25 | verbs: 26 | - get 27 | - update 28 | - patch 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - events 33 | verbs: 34 | - create 35 | - patch 36 | -------------------------------------------------------------------------------- /bundle/manifests/leader-election-rolebinding_rbac.authorization.k8s.io_v1_rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | name: elasticsearch-operator 7 | name: leader-election-rolebinding 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: leader-election-role 12 | subjects: 13 | - kind: ServiceAccount 14 | name: default 15 | namespace: openshift-operators-redhat 16 | -------------------------------------------------------------------------------- /bundle/manifests/metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | name: elasticsearch-operator 7 | name: metrics-reader 8 | rules: 9 | - nonResourceURLs: 10 | - /metrics 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /bundle/manifests/metrics-reader_rbac.authorization.k8s.io_v1beta1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | name: elasticsearch-operator 7 | name: metrics-reader 8 | rules: 9 | - nonResourceURLs: 10 | - /metrics 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /bundle/manifests/prometheus_rbac.authorization.k8s.io_v1_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | annotations: 5 | include.release.openshift.io/self-managed-high-availability: "true" 6 | include.release.openshift.io/single-node-developer: "true" 7 | creationTimestamp: null 8 | labels: 9 | name: elasticsearch-operator 10 | name: prometheus 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - services 16 | - endpoints 17 | - pods 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | -------------------------------------------------------------------------------- /bundle/manifests/prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | annotations: 5 | include.release.openshift.io/self-managed-high-availability: "true" 6 | include.release.openshift.io/single-node-developer: "true" 7 | creationTimestamp: null 8 | labels: 9 | name: elasticsearch-operator 10 | name: prometheus 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: prometheus 15 | subjects: 16 | - kind: ServiceAccount 17 | name: prometheus-k8s 18 | namespace: openshift-monitoring 19 | -------------------------------------------------------------------------------- /bundle/manifests/proxy-role_rbac.authorization.k8s.io_v1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | name: elasticsearch-operator 7 | name: proxy-role 8 | rules: 9 | - apiGroups: 10 | - authentication.k8s.io 11 | resources: 12 | - tokenreviews 13 | verbs: 14 | - create 15 | - apiGroups: 16 | - authorization.k8s.io 17 | resources: 18 | - subjectaccessreviews 19 | verbs: 20 | - create 21 | -------------------------------------------------------------------------------- /bundle/manifests/proxy-rolebinding_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | name: elasticsearch-operator 7 | name: proxy-rolebinding 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: proxy-role 12 | subjects: 13 | - kind: ServiceAccount 14 | name: default 15 | namespace: openshift-operators-redhat 16 | -------------------------------------------------------------------------------- /bundle/metadata/annotations.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | # Core bundle annotations. 3 | operators.operatorframework.io.bundle.mediatype.v1: registry+v1 4 | operators.operatorframework.io.bundle.manifests.v1: manifests/ 5 | operators.operatorframework.io.bundle.metadata.v1: metadata/ 6 | operators.operatorframework.io.bundle.package.v1: elasticsearch-operator 7 | operators.operatorframework.io.bundle.channels.v1: stable,stable-5.8 8 | operators.operatorframework.io.bundle.channel.default.v1: stable 9 | operators.operatorframework.io.metrics.builder: operator-sdk-unknown 10 | operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 11 | operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 12 | 13 | # Annotations for testing. 14 | operators.operatorframework.io.test.mediatype.v1: scorecard+v1 15 | operators.operatorframework.io.test.config.v1: tests/scorecard/ 16 | -------------------------------------------------------------------------------- /bundle/metadata/properties.yaml: -------------------------------------------------------------------------------- 1 | properties: 2 | - type: olm.maxOpenShiftVersion 3 | value: 4.15 4 | -------------------------------------------------------------------------------- /bundle/tests/scorecard/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | storage: 9 | spec: 10 | mountPath: {} 11 | -------------------------------------------------------------------------------- /config/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.openshift.io/v1 2 | kind: Elasticsearch 3 | projectName: openshift-logging 4 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/logging.openshift.io_elasticsearches.yaml 6 | - bases/logging.openshift.io_kibanas.yaml 7 | # +kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patchesStrategicMerge: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- patches/webhook_in_elasticsearches.yaml 13 | #- patches/webhook_in_kibanas.yaml 14 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 15 | 16 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 17 | # patches here are for enabling the CA injection for each CRD 18 | #- patches/cainjection_in_elasticsearches.yaml 19 | #- patches/cainjection_in_kibanas.yaml 20 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 21 | 22 | # the following config is for teaching kustomize how to do kustomization for CRDs. 23 | configurations: 24 | - kustomizeconfig.yaml 25 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | group: apiextensions.k8s.io 8 | path: spec/conversion/webhookClientConfig/service/name 9 | 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | version: v1 13 | group: apiextensions.k8s.io 14 | path: spec/conversion/webhookClientConfig/service/namespace 15 | create: false 16 | 17 | varReference: 18 | - path: metadata/annotations 19 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: openshift-operators-redhat 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | # namePrefix: elasticsearch-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | commonLabels: 13 | name: elasticsearch-operator 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | - ../prometheus 20 | 21 | patchesStrategicMerge: 22 | - manager_auth_proxy_patch.yaml 23 | 24 | # the following config is for teaching kustomize how to do var substitution 25 | vars: 26 | 27 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: elasticsearch-operator 7 | labels: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: quay.io/openshift/origin-kube-rbac-proxy:latest 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt" 19 | - "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key" 20 | - "--v=2" 21 | ports: 22 | - containerPort: 8443 23 | protocol: TCP 24 | name: https 25 | volumeMounts: 26 | - mountPath: /var/run/secrets/serving-cert 27 | name: elasticsearch-operator-metrics-cert 28 | securityContext: 29 | allowPrivilegeEscalation: false 30 | capabilities: 31 | drop: 32 | - ALL 33 | - name: elasticsearch-operator 34 | args: 35 | - "--health-probe-bind-address=:8081" 36 | - "--metrics-bind-address=127.0.0.1:8080" 37 | volumes: 38 | - name: elasticsearch-operator-metrics-cert 39 | secret: 40 | defaultMode: 420 41 | optional: true 42 | secretName: elasticsearch-operator-metrics 43 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: elasticsearch-operator 5 | labels: 6 | spec: 7 | selector: 8 | matchLabels: 9 | replicas: 1 10 | template: 11 | metadata: 12 | annotations: 13 | kubectl.kubernetes.io/default-container: elasticsearch-operator 14 | labels: 15 | spec: 16 | nodeSelector: 17 | kubernetes.io/os: linux 18 | serviceAccountName: elasticsearch-operator 19 | containers: 20 | - command: 21 | - elasticsearch-operator 22 | ports: 23 | - containerPort: 8080 24 | name: http 25 | image: quay.io/openshift-logging/elasticsearch-operator:latest 26 | name: elasticsearch-operator 27 | imagePullPolicy: IfNotPresent 28 | resources: {} 29 | securityContext: 30 | allowPrivilegeEscalation: false 31 | capabilities: 32 | drop: 33 | - ALL 34 | livenessProbe: 35 | httpGet: 36 | path: /healthz 37 | port: 8081 38 | initialDelaySeconds: 15 39 | periodSeconds: 20 40 | readinessProbe: 41 | httpGet: 42 | path: /readyz 43 | port: 8081 44 | initialDelaySeconds: 5 45 | periodSeconds: 10 46 | env: 47 | - name: WATCH_NAMESPACE 48 | valueFrom: 49 | fieldRef: 50 | fieldPath: metadata.annotations['olm.targetNamespaces'] 51 | - name: POD_NAME 52 | valueFrom: 53 | fieldRef: 54 | fieldPath: metadata.name 55 | - name: OPERATOR_NAME 56 | value: "elasticsearch-operator" 57 | - name: RELATED_IMAGE_ELASTICSEARCH_PROXY 58 | value: "quay.io/openshift-logging/elasticsearch-proxy:1.0" 59 | - name: RELATED_IMAGE_ELASTICSEARCH 60 | value: "quay.io/openshift-logging/elasticsearch6:6.8.1" 61 | - name: RELATED_IMAGE_KIBANA 62 | value: "quay.io/openshift-logging/kibana6:6.8.1" 63 | - name: RELATED_IMAGE_CURATOR 64 | value: "quay.io/openshift-logging/curator5:5.8.1" 65 | securityContext: 66 | runAsNonRoot: true 67 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../default 3 | - ../samples 4 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | name: elasticsearch-operator 8 | name: elasticsearch-operator-metrics-monitor 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | path: /metrics 13 | targetPort: 8443 14 | scheme: https 15 | interval: 30s 16 | scrapeTimeout: 10s 17 | tlsConfig: 18 | caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt 19 | serverName: elasticsearch-operator-metrics.openshift-operators-redhat.svc 20 | selector: 21 | matchLabels: 22 | name: elasticsearch-operator 23 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: ["/metrics"] 7 | verbs: ["get"] 8 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.openshift.io/serving-cert-secret-name: elasticsearch-operator-metrics 6 | labels: 7 | name: elasticsearch-operator 8 | name: elasticsearch-operator-metrics 9 | spec: 10 | ports: 11 | - name: https 12 | port: 8443 13 | protocol: TCP 14 | targetPort: https 15 | selector: 16 | name: elasticsearch-operator 17 | -------------------------------------------------------------------------------- /config/rbac/elasticsearch_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit elasticsearches. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: elasticsearch-editor-role 6 | rules: 7 | - apiGroups: 8 | - logging.openshift.io 9 | resources: 10 | - elasticsearches 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - logging.openshift.io 21 | resources: 22 | - elasticsearches/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/elasticsearch_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view elasticsearches. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: elasticsearch-viewer-role 6 | rules: 7 | - apiGroups: 8 | - logging.openshift.io 9 | resources: 10 | - elasticsearches 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - logging.openshift.io 17 | resources: 18 | - elasticsearches/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/kibana_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit kibanas. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: kibana-editor-role 6 | rules: 7 | - apiGroups: 8 | - logging.openshift.io 9 | resources: 10 | - kibanas 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - logging.openshift.io 21 | resources: 22 | - kibanas/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/kibana_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view kibanas. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: kibana-viewer-role 6 | rules: 7 | - apiGroups: 8 | - logging.openshift.io 9 | resources: 10 | - kibanas 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - logging.openshift.io 17 | resources: 18 | - kibanas/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | - auth_proxy_service.yaml 7 | - auth_proxy_role.yaml 8 | - auth_proxy_role_binding.yaml 9 | - auth_proxy_client_clusterrole.yaml 10 | - prometheus_role.yaml 11 | - prometheus_role_binding.yaml 12 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - events 31 | verbs: 32 | - create 33 | - patch 34 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/prometheus_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | annotations: 5 | include.release.openshift.io/self-managed-high-availability: "true" 6 | include.release.openshift.io/single-node-developer: "true" 7 | name: prometheus 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - services 13 | - endpoints 14 | - pods 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | -------------------------------------------------------------------------------- /config/rbac/prometheus_role_binding.yaml: -------------------------------------------------------------------------------- 1 | # Grant cluster-monitoring access to openshift-operators-redhat metrics 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: prometheus 6 | annotations: 7 | include.release.openshift.io/self-managed-high-availability: "true" 8 | include.release.openshift.io/single-node-developer: "true" 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: prometheus 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-k8s 16 | namespace: openshift-monitoring 17 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | creationTimestamp: null 6 | name: elasticsearch-operator 7 | rules: 8 | - nonResourceURLs: 9 | - /metrics 10 | verbs: 11 | - get 12 | - apiGroups: 13 | - apps 14 | resources: 15 | - daemonsets 16 | - deployments 17 | - replicasets 18 | - statefulsets 19 | verbs: 20 | - '*' 21 | - apiGroups: 22 | - apps 23 | resourceNames: 24 | - elasticsearch-operator 25 | resources: 26 | - deployments/finalizers 27 | verbs: 28 | - update 29 | - apiGroups: 30 | - authentication.k8s.io 31 | resources: 32 | - subjectaccessreviews 33 | - tokenreviews 34 | verbs: 35 | - create 36 | - apiGroups: 37 | - authorization.k8s.io 38 | resources: 39 | - subjectaccessreviews 40 | verbs: 41 | - create 42 | - apiGroups: 43 | - batch 44 | resources: 45 | - cronjobs 46 | verbs: 47 | - '*' 48 | - apiGroups: 49 | - config.openshift.io 50 | resources: 51 | - oauths 52 | - proxies 53 | verbs: 54 | - get 55 | - list 56 | - watch 57 | - apiGroups: 58 | - console.openshift.io 59 | resources: 60 | - consoleexternalloglinks 61 | - consolelinks 62 | verbs: 63 | - create 64 | - delete 65 | - get 66 | - update 67 | - apiGroups: 68 | - coordination.k8s.io 69 | resources: 70 | - leases 71 | verbs: 72 | - create 73 | - get 74 | - update 75 | - apiGroups: 76 | - "" 77 | resources: 78 | - configmaps 79 | - endpoints 80 | - events 81 | - persistentvolumeclaims 82 | - pods 83 | - pods/exec 84 | - secrets 85 | - serviceaccounts 86 | - services 87 | - services/finalizers 88 | verbs: 89 | - '*' 90 | - apiGroups: 91 | - image.openshift.io 92 | resources: 93 | - imagestreams 94 | verbs: 95 | - get 96 | - list 97 | - watch 98 | - apiGroups: 99 | - logging.openshift.io 100 | resources: 101 | - '*' 102 | verbs: 103 | - '*' 104 | - apiGroups: 105 | - monitoring.coreos.com 106 | resources: 107 | - prometheusrules 108 | - servicemonitors 109 | verbs: 110 | - '*' 111 | - apiGroups: 112 | - networking.k8s.io 113 | resources: 114 | - networkpolicies 115 | verbs: 116 | - create 117 | - delete 118 | - apiGroups: 119 | - oauth.openshift.io 120 | resources: 121 | - oauthclients 122 | verbs: 123 | - '*' 124 | - apiGroups: 125 | - rbac.authorization.k8s.io 126 | resources: 127 | - clusterrolebindings 128 | - clusterroles 129 | verbs: 130 | - '*' 131 | - apiGroups: 132 | - rbac.authorization.k8s.io 133 | resources: 134 | - rolebindings 135 | - roles 136 | verbs: 137 | - '*' 138 | - apiGroups: 139 | - route.openshift.io 140 | resources: 141 | - routes 142 | - routes/custom-host 143 | verbs: 144 | - '*' 145 | - apiGroups: 146 | - security.openshift.io 147 | resources: 148 | - securitycontextconstraints 149 | verbs: 150 | - create 151 | - get 152 | - list 153 | - update 154 | - watch 155 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: elasticsearch-operator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: elasticsearch-operator 12 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## This file is auto-generated, do not modify ## 2 | resources: 3 | - logging_v1_elasticsearch.yaml 4 | - logging_v1_kibana.yaml 5 | -------------------------------------------------------------------------------- /config/samples/logging_v1_elasticsearch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "logging.openshift.io/v1" 2 | kind: "Elasticsearch" 3 | metadata: 4 | name: "elasticsearch" 5 | spec: 6 | managementState: "Managed" 7 | nodeSpec: 8 | resources: 9 | limits: 10 | memory: 1Gi 11 | requests: 12 | cpu: 100m 13 | memory: 512Mi 14 | nodes: 15 | - nodeCount: 1 16 | roles: ["client", "data", "master"] 17 | storage: 18 | size: 20G 19 | redundancyPolicy: ZeroRedundancy 20 | indexManagement: 21 | policies: 22 | - name: infra-policy 23 | pollInterval: 30m 24 | phases: 25 | hot: 26 | actions: 27 | rollover: 28 | maxAge: 8h 29 | delete: 30 | minAge: 2d 31 | pruneNamespacesInterval: 24h 32 | namespaceSpec: 33 | - namespace: openshift-monitoring 34 | minAge: 5h 35 | mappings: 36 | - name: infra 37 | policyRef: infra-policy 38 | aliases: ["infra", "logs.infra"] -------------------------------------------------------------------------------- /config/samples/logging_v1_kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.openshift.io/v1 2 | kind: Kibana 3 | metadata: 4 | name: kibana 5 | spec: 6 | managementState: "Managed" 7 | replicas: 1 8 | nodeSelector: {} 9 | resources: 10 | limits: 11 | memory: 512Mi 12 | requests: 13 | memory: 512Mi -------------------------------------------------------------------------------- /controllers/logging/secret_controller.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | corev1 "k8s.io/api/core/v1" 8 | apierrors "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/apimachinery/pkg/types" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/event" 14 | "sigs.k8s.io/controller-runtime/pkg/predicate" 15 | 16 | loggingv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1" 17 | "github.com/openshift/elasticsearch-operator/internal/elasticsearch" 18 | ) 19 | 20 | // SecretReconciler reconciles a Secret object 21 | type SecretReconciler struct { 22 | client.Client 23 | Log logr.Logger 24 | Scheme *runtime.Scheme 25 | } 26 | 27 | func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 28 | _ = context.Background() 29 | 30 | cluster := &loggingv1.Elasticsearch{} 31 | esName := types.NamespacedName{ 32 | Namespace: req.Namespace, 33 | Name: req.Name, 34 | } 35 | 36 | err := r.Get(ctx, esName, cluster) 37 | if err != nil { 38 | if apierrors.IsNotFound(err) { 39 | return ctrl.Result{}, nil 40 | } 41 | return ctrl.Result{}, err 42 | } 43 | 44 | ok, err := elasticsearch.SecretReconcile(r.Log, cluster, r.Client) 45 | if !ok { 46 | return reconcileResult, err 47 | } 48 | return ctrl.Result{}, err 49 | } 50 | 51 | func esSecretUpdatePredicate(r client.Client) predicate.Predicate { 52 | return predicate.Funcs{ 53 | UpdateFunc: func(e event.UpdateEvent) bool { 54 | cluster := &loggingv1.Elasticsearch{} 55 | esName := types.NamespacedName{ 56 | Namespace: e.ObjectNew.GetNamespace(), 57 | Name: e.ObjectNew.GetName(), 58 | } 59 | err := r.Get(context.TODO(), esName, cluster) 60 | if err != nil { 61 | return false 62 | } 63 | return true 64 | }, 65 | CreateFunc: func(e event.CreateEvent) bool { 66 | return true 67 | }, 68 | DeleteFunc: func(e event.DeleteEvent) bool { 69 | return false 70 | }, 71 | GenericFunc: func(e event.GenericEvent) bool { 72 | return false 73 | }, 74 | } 75 | } 76 | 77 | func (r *SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { 78 | return ctrl.NewControllerManagedBy(mgr). 79 | For(&corev1.Secret{}). 80 | WithEventFilter(esSecretUpdatePredicate(r.Client)). 81 | Complete(r) 82 | } 83 | -------------------------------------------------------------------------------- /dev-meta.yaml: -------------------------------------------------------------------------------- 1 | from: 2 | - source: registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder\:v(?:[\.0-9\-]*).* 3 | target: registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.20-openshift-4.14 AS builder 4 | - source: registry.redhat.io/ubi9:9.(\d)-([\.0-9])* 5 | target: docker.io/centos:9 AS centos 6 | env: 7 | - source: RUNBOOK_BASE_URL=.* 8 | target: RUNBOOK_BASE_URL="https://github.com/openshift/elasticsearch-operator/blob/master/docs/alerts.md" 9 | -------------------------------------------------------------------------------- /docs/access-control.md: -------------------------------------------------------------------------------- 1 | # Access Control 2 | Authentication and authorization is provided by using a combination of the [Open Distro for Elasticsearch](https://opendistro.github.io/for-elasticsearch/) plugin and the [elasticsearch-proxy](https://github.com/openshift/elasticsearch-proxy). The plugin relies upon [statically defined OpenDistro roles](https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch/sgconfig) and a user's OpenShift Roles and projects to determine access to logs. Following is a brief explanation how roles and role mappings are evaluated in an ElasticSearch deployment for OpenShift logging to control access to log records. Please see the official [Open Distro security documentation](https://opendistro.github.io/for-elasticsearch-docs/docs/security/) for additional information. 3 | 4 | ## Role Definitions and Permissions 5 | Permissions are statically defined and seeded when the Elasticsearch cluster starts. They are grouped into two distinct roles: project user, administrator. Figure 1 diagrams the workflow to evaluate a user's access. The calls to the OpenShfit API server are cached by the elasticsearch-proxy and periodically expire so they stay in sync with OpenShift permissions. 6 | 7 | **Figure 1** 8 | 9 | ![Authentication and Authorization Workflow](./images/access-control.svg) 10 | 11 | ### Project user 12 | Project users are able to access logs from any namespace to which they have access as determined by the equivalent call: 13 | ``` 14 | oc get projects --token $USERTOKEN 15 | ``` 16 | 17 | ### Admin user 18 | An admin user is able to access all collected logs(i.e. app, infra, audit) if they have an admin role as determined by: 19 | ``` 20 | oc -n default auth can-i get pods/logs 21 | ``` 22 | -------------------------------------------------------------------------------- /docs/images/troubleshooting/ES_Metrics_Dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/docs/images/troubleshooting/ES_Metrics_Dashboard.png -------------------------------------------------------------------------------- /docs/images/troubleshooting/ES_Metrics_forcemerge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/docs/images/troubleshooting/ES_Metrics_forcemerge.png -------------------------------------------------------------------------------- /docs/images/troubleshooting/areachart_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/docs/images/troubleshooting/areachart_visualization.png -------------------------------------------------------------------------------- /docs/images/troubleshooting/barchart_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/docs/images/troubleshooting/barchart_visualization.png -------------------------------------------------------------------------------- /docs/images/troubleshooting/dashboard_logs_by_namespace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/docs/images/troubleshooting/dashboard_logs_by_namespace.png -------------------------------------------------------------------------------- /docs/troubleshooting_resources/query_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "size": 0, 3 | "query": { 4 | "range": { 5 | "@timestamp": { 6 | "gte": "now-3h", 7 | "lt": "now" 8 | } 9 | } 10 | }, 11 | "aggs": { 12 | "Histogram": { 13 | "date_histogram": { 14 | "field": "@timestamp", 15 | "interval": "hour" 16 | }, 17 | "aggs": { 18 | "top_namespaces": { 19 | "terms": { 20 | "size": 10, 21 | "order" : { "_count" : "desc"}, 22 | "field": "kubernetes.namespace_name" 23 | } 24 | } 25 | } 26 | } 27 | } 28 | } -------------------------------------------------------------------------------- /docs/troubleshooting_resources/query_2.json: -------------------------------------------------------------------------------- 1 | { 2 | "query": { 3 | "bool": { 4 | "must": [ 5 | { 6 | "terms": { 7 | "kubernetes.namespace_name": [ 8 | "openshift-cluster-version", 9 | "openshift-kube-apiserver" 10 | ] 11 | } 12 | } 13 | ], 14 | "filter": [ 15 | { 16 | "range": { 17 | "@timestamp": { "lt": "now-24h" } 18 | } 19 | } 20 | ] 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /files/prometheus_recording_rules.yml: -------------------------------------------------------------------------------- 1 | --- 2 | "groups": 3 | - "name": "logging_elasticsearch.rules" 4 | "rules": 5 | - "expr": | 6 | rate(es_threadpool_threads_count{name="write", type="rejected"}[2m]) 7 | "record": "writing:rejected_requests:rate2m" 8 | - "expr": | 9 | rate(es_threadpool_threads_count{name="write", type="completed"}[2m]) 10 | "record": "writing:completed_requests:rate2m" 11 | - "expr": | 12 | sum by (cluster, instance, node) (writing:rejected_requests:rate2m) / on (cluster, instance, node) (writing:completed_requests:rate2m) 13 | "record": "writing:reject_ratio:rate2m" 14 | - "name": "logging_elasticsearch_telemetry.rules" 15 | "rules": 16 | - "expr": | 17 | max by(cluster)(es_cluster_datanodes_number) 18 | "record": "cluster:eo_es_datanodes_total:max" 19 | - "expr": | 20 | sum by(cluster)(es_indices_doc_number) 21 | "record": "cluster:eo_es_documents_created_total:sum" 22 | - "expr": | 23 | sum by(cluster)(es_indices_doc_deleted_number) 24 | "record": "cluster:eo_es_documents_deleted_total:sum" 25 | - "expr": | 26 | max(sum by(pod)(es_cluster_shards_number{type!="active_primary"})) 27 | "record": "pod:eo_es_shards_total:max" 28 | -------------------------------------------------------------------------------- /golangci.yaml: -------------------------------------------------------------------------------- 1 | linters: 2 | disable: 3 | - megacheck 4 | enable: 5 | - bodyclose 6 | - gomodguard 7 | - gosec 8 | - gosimple 9 | - staticcheck 10 | - stylecheck 11 | - unused 12 | linters-settings: 13 | depguard: 14 | list-type: blacklist 15 | include-go-root: false 16 | packages-with-error-message: 17 | # specify an error message to output when a blacklisted package is used 18 | - github.com/sirupsen/logrus: "Only the internal package pkg/log is allowed" 19 | - sigs.k8s.io/controller-runtime/pkg/log: "Only the internal package pkg/log is allowed" 20 | - sigs.k8s.io/controller-runtime/pkg/log/zap: "Only the internal package pkg/log is allowed" 21 | 22 | issues: 23 | exclude: 24 | - "weak cryptographic primitive" 25 | - "Blacklisted import `crypto/md5`" 26 | - "G101: Potential hardcoded credentials" 27 | - "G306: Expect WriteFile permissions to be 0600 or less" 28 | # Excluding configuration per-path, per-linter, per-text and per-source 29 | exclude-rules: 30 | # since we are using ginkgo we must use dot imports 31 | - linters: ["stylecheck"] 32 | text: "ST1001: should not use dot imports" 33 | path: test/ 34 | - text: ".*" 35 | path: pkg/elasticsearch/client.go 36 | - text: ".*" 37 | path: pkg/elasticsearch/client_test.go 38 | -------------------------------------------------------------------------------- /hack/build-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "${DEBUG:-}" = "true" ]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | source "$(dirname $0)/common" 9 | 10 | IMAGE_TAG=$1 11 | IMAGE_BUILDER=${2:-imagebuilder} 12 | IMAGE_BUILDER_OPTS=${3:-} 13 | 14 | workdir=${WORKDIR:-$( mktemp --tmpdir -d elasticsearch-operator-build-XXXXXXXXXX )} 15 | if [ -z "${WORKDIR:-}" ] ; then 16 | trap "rm -rf $workdir" EXIT 17 | fi 18 | 19 | if image_is_ubi Dockerfile ; then 20 | pull_ubi_if_needed 21 | fi 22 | 23 | if image_needs_private_repo Dockerfile ; then 24 | repodir=$( get_private_repo_dir $workdir ) 25 | mountarg="-mount $repodir:/etc/yum.repos.d/" 26 | else 27 | mountarg="" 28 | fi 29 | 30 | echo building image $IMAGE_TAG - this may take a few minutes until you see any output . . . 31 | $IMAGE_BUILDER $IMAGE_BUILDER_OPTS $mountarg -t $IMAGE_TAG . 32 | -------------------------------------------------------------------------------- /hack/cr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "logging.openshift.io/v1" 3 | kind: "Elasticsearch" 4 | metadata: 5 | name: "elasticsearch" 6 | annotations: 7 | elasticsearch.openshift.io/loglevel: trace 8 | logging.openshift.io/elasticsearch-cert-management: "true" 9 | logging.openshift.io/elasticsearch-cert.fluentd: "system.logging.fluentd" 10 | spec: 11 | managementState: "Managed" 12 | nodeSpec: 13 | resources: 14 | limits: 15 | memory: 1Gi 16 | requests: 17 | cpu: 100m 18 | memory: 1Gi 19 | nodes: 20 | - nodeCount: 1 21 | roles: 22 | - client 23 | - data 24 | - master 25 | storage: {} 26 | redundancyPolicy: ZeroRedundancy 27 | indexManagement: 28 | policies: 29 | - name: infra-policy 30 | pollInterval: 1m 31 | phases: 32 | hot: 33 | actions: 34 | rollover: 35 | maxAge: 2m 36 | delete: 37 | minAge: 5m 38 | pruneNamespacesInterval: 15m 39 | namespaceSpec: 40 | - namespace: openshift- #note: prefix-query is supported 41 | minAge: 10m 42 | mappings: 43 | - name: infra 44 | policyRef: infra-policy 45 | aliases: 46 | - infra 47 | - logs.infra 48 | -------------------------------------------------------------------------------- /hack/cvo-unmanage-console.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: config.openshift.io/v1 2 | kind: ClusterVersion 3 | metadata: 4 | namespace: openshift-cluster-version 5 | name: version 6 | spec: 7 | overrides: 8 | - kind: Deployment 9 | name: console-operator 10 | namespace: openshift-console-operator 11 | unmanaged: true 12 | group: apps 13 | - kind: ClusterRole 14 | name: console-operator 15 | namespace: "" 16 | unmanaged: true 17 | group: rbac.authorization.k8s.io 18 | - kind: CustomResourceDefinition 19 | name: consoleclidownloads.console.openshift.io 20 | namespace: "" 21 | unmanaged: true 22 | group: apiextensions.k8s.io 23 | - kind: CustomResourceDefinition 24 | name: consoleexternalloglinks.console.openshift.io 25 | namespace: "" 26 | unmanaged: true 27 | group: apiextensions.k8s.io 28 | - kind: CustomResourceDefinition 29 | name: consolelinks.console.openshift.io 30 | namespace: "" 31 | unmanaged: true 32 | group: apiextensions.k8s.io 33 | - kind: CustomResourceDefinition 34 | name: consolenotifications.console.openshift.io 35 | namespace: "" 36 | unmanaged: true 37 | group: apiextensions.k8s.io 38 | - kind: CustomResourceDefinition 39 | name: consoleplugins.console.openshift.io 40 | namespace: "" 41 | unmanaged: true 42 | group: apiextensions.k8s.io 43 | - kind: CustomResourceDefinition 44 | name: consolequickstarts.console.openshift.io 45 | namespace: "" 46 | unmanaged: true 47 | group: apiextensions.k8s.io 48 | - kind: CustomResourceDefinition 49 | name: consoles.config.openshift.io 50 | namespace: "" 51 | unmanaged: true 52 | group: apiextensions.k8s.io 53 | - kind: CustomResourceDefinition 54 | name: consoles.operator.openshift.io 55 | namespace: "" 56 | unmanaged: true 57 | group: apiextensions.k8s.io 58 | - kind: CustomResourceDefinition 59 | name: consoleyamlsamples.console.openshift.io 60 | namespace: "" 61 | unmanaged: true 62 | group: apiextensions.k8s.io 63 | -------------------------------------------------------------------------------- /hack/deploy-example-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | namespace=$1 6 | 7 | oc -n $namespace delete secret elasticsearch ||: 8 | oc -n $namespace create secret generic elasticsearch \ 9 | --from-file=admin-key=/tmp/example-secrets/system.admin.key \ 10 | --from-file=admin-cert=/tmp/example-secrets/system.admin.crt \ 11 | --from-file=admin-ca=/tmp/example-secrets/ca.crt \ 12 | --from-file=/tmp/example-secrets/elasticsearch.key \ 13 | --from-file=/tmp/example-secrets/elasticsearch.crt \ 14 | --from-file=/tmp/example-secrets/logging-es.key \ 15 | --from-file=/tmp/example-secrets/logging-es.crt 16 | 17 | oc -n $namespace delete secret kibana ||: 18 | oc -n $namespace create secret generic kibana \ 19 | --from-file=ca=/tmp/example-secrets/ca.crt \ 20 | --from-file=key=/tmp/example-secrets/system.logging.kibana.key \ 21 | --from-file=cert=/tmp/example-secrets/system.logging.kibana.crt 22 | 23 | oc -n $namespace delete secret kibana-proxy ||: 24 | oc -n $namespace create secret generic kibana-proxy \ 25 | --from-literal=session-secret=abcdefghijklmnopqrstuvwxyz123456 \ 26 | --from-file=server-key=/tmp/example-secrets/kibana-internal.key \ 27 | --from-file=server-cert=/tmp/example-secrets/kibana-internal.crt 28 | -------------------------------------------------------------------------------- /hack/deploy-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "${DEBUG:-}" = "true" ]; then 4 | set -x 5 | fi 6 | set -euo pipefail 7 | 8 | echo "Setting up port-forwarding to remote registry ..." 9 | coproc oc -n openshift-image-registry port-forward service/image-registry 5000:5000 10 | trap "kill -15 $COPROC_PID" EXIT 11 | read PORT_FORWARD_STDOUT <&"${COPROC[0]}" 12 | if [[ "$PORT_FORWARD_STDOUT" =~ ^Forwarding.*5000$ ]] ; then 13 | user=$(oc whoami | sed s/://) 14 | echo "Login to registry..." 15 | podman login --tls-verify=false -u ${user} -p $(oc whoami -t) 127.0.0.1:5000 16 | 17 | echo "Pushing image ${IMAGE_TAG} ..." 18 | if podman push --tls-verify=false ${IMAGE_TAG} ; then 19 | oc -n openshift get imagestreams | grep elasticsearch-operator 20 | fi 21 | else 22 | echo "Unexpected message from oc port-forward: $PORT_FORWARD_STDOUT" 23 | fi 24 | -------------------------------------------------------------------------------- /hack/deploy-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script inits an elasticsearch-operator 3 | # to deploy an Elasticsearch cluster. It assumes it is capable of login as a 4 | # user who has the cluster-admin role 5 | 6 | if [ "${DEBUG:-}" = "true" ]; then 7 | set -x 8 | fi 9 | set -euo pipefail 10 | 11 | source "$(dirname $0)/common" 12 | 13 | if [[ -z `oc get project ${NAMESPACE} 2> /dev/null` ]] ; then 14 | cat < 1: 10 | dockerfileInFile = sys.argv[1] 11 | if len(sys.argv) > 2: 12 | metaFile = sys.argv[2] 13 | 14 | with open(dockerfileInFile, 'r') as f: 15 | dockerfileIn = f.read() 16 | 17 | metaFile = os.path.join(os.path.dirname(dockerfileInFile), metaFile) 18 | with open(metaFile, 'r') as f: 19 | metaYaml = yaml.safe_load(f) 20 | 21 | froms = metaYaml['from'] 22 | if froms and len(froms) > 0: 23 | for base in froms: 24 | dockerfileIn = re.sub("FROM " + base['source'],"FROM " + base['target'],dockerfileIn) 25 | 26 | envs = metaYaml['env'] 27 | if envs and len(envs) > 0: 28 | for base in envs: 29 | dockerfileIn = re.sub("ENV " + base['source'],"ENV " + base['target'],dockerfileIn) 30 | 31 | #Remove aliases if only one is defined otherwise it will fail 32 | aliases = [] 33 | froms = 0 34 | for l in dockerfileIn.split("\n"): 35 | if l.startswith("FROM"): 36 | froms = froms + 1 37 | index = l.rfind("AS") 38 | if index > -1: 39 | aliases.append(l[index + 3:]) 40 | if len(aliases) == 1 and froms == 1: 41 | dockerfileIn = re.sub("--from=" + aliases[0],"",dockerfileIn) 42 | 43 | exclude = False 44 | print("### This is a generated file from Dockerfile.in ###") 45 | for l in dockerfileIn.split('\n'): 46 | if l == "## EXCLUDE BEGIN ##": 47 | exclude = True 48 | continue 49 | if l == "## EXCLUDE END ##": 50 | exclude = False 51 | continue 52 | if not exclude: 53 | print(l) 54 | -------------------------------------------------------------------------------- /hack/image-stream-build-config-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: "Template" 3 | metadata: 4 | name: elasticsearch-dev-build-template 5 | annotations: 6 | description: "Template for creating local builds of logging components from source." 7 | tags: "infrastructure" 8 | labels: 9 | logging-infra: development 10 | provider: openshift 11 | component: development 12 | objects: 13 | - apiVersion: v1 14 | kind: ImageStream 15 | metadata: 16 | labels: 17 | build: origin-elasticsearch-operator 18 | name: origin-elasticsearch-operator 19 | spec: {} 20 | - apiVersion: v1 21 | kind: BuildConfig 22 | metadata: 23 | labels: 24 | app: elasticsearch-operator 25 | name: elasticsearch-operator 26 | spec: 27 | output: 28 | to: 29 | kind: ImageStreamTag 30 | name: origin-elasticsearch-operator:latest 31 | resources: {} 32 | source: 33 | git: 34 | uri: ${ES_OP_GITHUB_URL} 35 | ref: ${ES_OP_GITHUB_BRANCH} 36 | type: Git 37 | strategy: 38 | dockerStrategy: 39 | dockerfilePath: Dockerfile 40 | type: Docker 41 | parameters: 42 | - 43 | description: 'URL for elasticsearch-operator fork' 44 | name: ES_OP_GITHUB_URL 45 | value: https://github.com/openshift/elasticsearch-operator 46 | - 47 | description: 'branch for elasticsearch-operator fork' 48 | name: ES_OP_GITHUB_BRANCH 49 | value: master 50 | -------------------------------------------------------------------------------- /hack/kibana-cr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: logging.openshift.io/v1 3 | kind: Kibana 4 | metadata: 5 | name: kibana 6 | spec: 7 | managementState: Managed 8 | replicas: 1 9 | resources: 10 | limits: 11 | memory: 736Mi 12 | requests: 13 | cpu: 100m 14 | memory: 736Mi 15 | -------------------------------------------------------------------------------- /hack/lib/build/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This library holds utility functions for building releases. 4 | 5 | # os::build::release::check_for_rpms checks that an RPM release has been built 6 | function os::build::release::check_for_rpms() { 7 | if [[ ! -d "${OS_OUTPUT_RPMPATH}" || ! -d "${OS_OUTPUT_RPMPATH}/repodata" ]]; then 8 | relative_release_path="$( os::util::repository_relative_path "${OS_OUTPUT_RELEASEPATH}" )" 9 | relative_bin_path="$( os::util::repository_relative_path "${OS_OUTPUT_BINPATH}" )" 10 | os::log::fatal "No release RPMs have been built! RPMs are necessary to build container images. 11 | Build them with: 12 | $ OS_BUILD_ENV_PRESERVE=${relative_bin_path}:${relative_release_path} hack/env make build-rpms" 13 | fi 14 | } 15 | -------------------------------------------------------------------------------- /hack/lib/compress.awk: -------------------------------------------------------------------------------- 1 | # Helper functions 2 | function trim(s) { 3 | gsub(/^[ \t\r\n]+|[ \t\r\n]+$/, "", s); 4 | return s; 5 | } 6 | 7 | function printRecordAndCount(record, count) { 8 | print record; 9 | if (count > 1) { 10 | printf("... repeated %d times\n", count) 11 | } 12 | } 13 | 14 | BEGIN { 15 | # Before processing, set the record separator to the ASCII record separator character \x1e 16 | RS = "\x1e"; 17 | } 18 | 19 | # This action is executed for each record 20 | { 21 | # Build our current var from the trimmed record 22 | current = trim($0); 23 | 24 | # Bump the count of times we have seen it 25 | seen[current]++; 26 | 27 | # Print the previous record and its count (if it is not identical to the current record) 28 | if (previous && previous != current) { 29 | printRecordAndCount(previous, seen[previous]); 30 | } 31 | 32 | # Store the current record as the previous record 33 | previous = current; 34 | } 35 | 36 | END { 37 | # After processing, print the last record and count if it is non-empty 38 | if (previous) { 39 | printRecordAndCount(previous, seen[previous]); 40 | } 41 | } -------------------------------------------------------------------------------- /hack/lib/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is meant to be the entrypoint for OpenShift Bash scripts to import all of the support 4 | # libraries at once in order to make Bash script preambles as minimal as possible. This script recur- 5 | # sively `source`s *.sh files in this directory tree. As such, no files should be `source`ed outside 6 | # of this script to ensure that we do not attempt to overwrite read-only variables. 7 | 8 | set -o errexit 9 | set -o nounset 10 | set -o pipefail 11 | 12 | OS_SCRIPT_START_TIME="$( date +%s )"; export OS_SCRIPT_START_TIME 13 | 14 | # os::util::absolute_path returns the absolute path to the directory provided 15 | function os::util::absolute_path() { 16 | local relative_path="$1" 17 | local absolute_path 18 | 19 | pushd "${relative_path}" >/dev/null 20 | relative_path="$( pwd )" 21 | if [[ -h "${relative_path}" ]]; then 22 | absolute_path="$( readlink "${relative_path}" )" 23 | else 24 | absolute_path="${relative_path}" 25 | fi 26 | popd >/dev/null 27 | 28 | echo "${absolute_path}" 29 | } 30 | readonly -f os::util::absolute_path 31 | 32 | # find the absolute path to the root of the Origin source tree 33 | init_source="$( dirname "${BASH_SOURCE}" )/../.." 34 | OS_ROOT="$( os::util::absolute_path "${init_source}" )" 35 | export OS_ROOT 36 | OS_O_A_L_DIR="${OS_ROOT}" 37 | export OS_O_A_L_DIR 38 | cd "${OS_ROOT}" 39 | 40 | for library_file in $( find "${OS_ROOT}/hack/lib" -type f -name '*.sh' -not -path '*/hack/lib/init.sh' ); do 41 | source "${library_file}" 42 | done 43 | 44 | unset library_files library_file init_source 45 | 46 | # all of our Bash scripts need to have the stacktrace 47 | # handler installed to deal with errors 48 | os::log::stacktrace::install 49 | 50 | # All of our Bash scripts need to have access to the 51 | # binaries that we build so we don't have to find 52 | # them before every invocation. 53 | os::util::environment::update_path_var 54 | 55 | if [[ -z "${OS_TMP_ENV_SET-}" ]]; then 56 | os::util::environment::setup_tmpdir_vars "$( basename "$0" ".sh" )" 57 | fi 58 | 59 | # Allow setting $JUNIT_REPORT to toggle output behavior 60 | if [[ -n "${JUNIT_REPORT:-}" ]]; then 61 | export JUNIT_REPORT_OUTPUT="${LOG_DIR}/raw_test_output.log" 62 | fi -------------------------------------------------------------------------------- /hack/lib/util/find.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script contains helper functions for finding components 4 | # in the Origin repository or on the host machine running scripts. 5 | 6 | # os::util::find::system_binary determines the absolute path to a 7 | # system binary, if it exists. 8 | # 9 | # Globals: 10 | # None 11 | # Arguments: 12 | # - 1: binary name 13 | # Returns: 14 | # - location of the binary 15 | function os::util::find::system_binary() { 16 | local binary_name="$1" 17 | 18 | command -v "${binary_name}" 19 | } 20 | readonly -f os::util::find::system_binary 21 | 22 | # os::util::find::built_binary determines the absolute path to a 23 | # built binary for the current platform, if it exists. 24 | # 25 | # Globals: 26 | # - OS_OUTPUT_BINPATH 27 | # Arguments: 28 | # - 1: binary name 29 | # Returns: 30 | # - location of the binary 31 | function os::util::find::built_binary() { 32 | local binary_name="$1" 33 | 34 | local binary_path; binary_path="${OS_OUTPUT_BINPATH}/$( os::build::host_platform )/${binary_name}" 35 | # we need to check that the path leads to a file 36 | # as directories also have the executable bit set 37 | if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then 38 | echo "${binary_path}" 39 | return 0 40 | else 41 | return 1 42 | fi 43 | } 44 | readonly -f os::util::find::built_binary 45 | 46 | # os::util::find::gopath_binary determines the absolute path to a 47 | # binary installed through the go toolchain, if it exists. 48 | # 49 | # Globals: 50 | # - GOPATH 51 | # Arguments: 52 | # - 1: binary name 53 | # Returns: 54 | # - location of the binary 55 | function os::util::find::gopath_binary() { 56 | local binary_name="$1" 57 | 58 | local old_ifs="${IFS}" 59 | IFS=":" 60 | for part in ${GOPATH}; do 61 | local binary_path="${part}/bin/${binary_name}" 62 | # we need to check that the path leads to a file 63 | # as directories also have the executable bit set 64 | if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then 65 | echo "${binary_path}" 66 | IFS="${old_ifs}" 67 | return 0 68 | fi 69 | done 70 | IFS="${old_ifs}" 71 | return 1 72 | } 73 | readonly -f os::util::find::gopath_binary -------------------------------------------------------------------------------- /hack/lib/util/golang.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This library holds golang related utility functions. 4 | 5 | # os::golang::verify_go_version ensure the go tool exists and is a viable version. 6 | function os::golang::verify_go_version() { 7 | os::util::ensure::system_binary_exists 'go' 8 | 9 | local go_version 10 | go_version=($(go version)) 11 | if [[ "${go_version[2]}" != go1.8* ]]; then 12 | os::log::info "Detected go version: ${go_version[*]}." 13 | if [[ -z "${PERMISSIVE_GO:-}" ]]; then 14 | os::log::fatal "Please install Go version 1.8 or use PERMISSIVE_GO=y to bypass this check." 15 | else 16 | os::log::warning "Detected golang version doesn't match preferred Go version for Origin." 17 | os::log::warning "This version mismatch could lead to differences in execution between this run and the Origin CI systems." 18 | return 0 19 | fi 20 | fi 21 | } 22 | readonly -f os::golang::verify_go_version 23 | -------------------------------------------------------------------------------- /hack/lib/util/logs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | get_logging_pod_logs() { 4 | local node 5 | for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') ; do 6 | for logfile in $(oc adm node-logs "$node" --path=fluentd/); do 7 | oc adm node-logs "$node" --path="fluentd/$logfile" > $ARTIFACT_DIR/$node-$logfile 8 | done 9 | done 10 | } 11 | -------------------------------------------------------------------------------- /hack/lint-dockerfile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/sh 2 | 3 | set -euo pipefail 4 | 5 | declare -A dockerfiles=(["Dockerfile"]="origin-meta.yaml" ["Dockerfile.dev"]="dev-meta.yaml") 6 | # check dockerfile changes 7 | for d in "${!dockerfiles[@]}"; do 8 | change=$(./hack/generate-dockerfile-from-midstream Dockerfile.in "${dockerfiles[$d]}" | md5sum | cut -d ' ' -f1) 9 | if [ "$change" != "$(md5sum $d | cut -d ' ' -f1)" ] ; then 10 | echo "A change was found in CI file $d that was not sourced from the midstream file Dockerfile.in (or vice versa)." 11 | echo "Please reset the CI file (e.g. $d), update Dockerfile.in, run make gen-dockerfiles and commit the results" 12 | exit 1 13 | fi 14 | done 15 | -------------------------------------------------------------------------------- /hack/prometheus-operator-crd-cluster-roles.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: prometheus-crd-view 5 | labels: 6 | rbac.authorization.k8s.io/aggregate-to-view: "true" 7 | rules: 8 | - apiGroups: ["monitoring.coreos.com"] 9 | resources: ["alertmanagers", "prometheuses", "prometheusrules", "servicemonitors"] 10 | verbs: ["get", "list", "watch"] 11 | --- 12 | kind: ClusterRole 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | metadata: 15 | name: prometheus-crd-edit 16 | labels: 17 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 18 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 19 | rules: 20 | - apiGroups: ["monitoring.coreos.com"] 21 | resources: ["alertmanagers, prometheuses, prometheusrules, servicemonitors"] 22 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 23 | -------------------------------------------------------------------------------- /hack/test-e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | current_dir=$(dirname "${BASH_SOURCE[0]}" ) 6 | source "${current_dir}/lib/init.sh" 7 | 8 | source "${current_dir}/../.bingo/variables.env" 9 | 10 | export DO_SETUP="${DO_SETUP:-true}" 11 | export GO_JUNIT_REPORT="${GO_JUNIT_REPORT:-go-junit-report}" 12 | export JUNITREPORT="${JUNITREPORT:-junitreport}" 13 | export JUNIT_REPORT_OUTPUT="${JUNIT_REPORT_OUTPUT_DIR:-/tmp/artifacts/junit}/junit.out" 14 | 15 | EXCLUDES=" " 16 | for test in $( find "${current_dir}/testing-olm" -type f -name 'test-*.sh' | sort); do 17 | if [[ ${test} =~ .*${EXCLUDES}.* ]] ; then 18 | os::log::info "===============================================================" 19 | os::log::info "skipping e2e that was excluded $test " 20 | os::log::info "===============================================================" 21 | continue 22 | fi 23 | os::log::info "===============================================================" 24 | os::log::info "running e2e $test " 25 | os::log::info "===============================================================" 26 | if "${test}" ; then 27 | os::log::info "===========================================================" 28 | os::log::info "e2e $test succeeded at $( date )" 29 | os::log::info "===========================================================" 30 | else 31 | 32 | os::log::error "============= FAILED FAILED ============= " 33 | os::log::error "e2e $test failed at $( date )" 34 | os::log::error "============= FAILED FAILED ============= " 35 | failed="true" 36 | fi 37 | done 38 | 39 | get_logging_pod_logs 40 | 41 | ARTIFACT_DIR="$JUNIT_REPORT_OUTPUT_DIR" os::test::junit::generate_report 42 | 43 | if [[ -n "${failed:-}" ]]; then 44 | exit 1 45 | fi 46 | -------------------------------------------------------------------------------- /hack/testing-olm-upgrade/pre-upgrade-commands.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | repo_dir="$( cd "$(dirname "$0")/../.." ; pwd -P )" 4 | source "$repo_dir/hack/testing-olm-upgrade/upgrade-common" 5 | 6 | # deploy elasticsearch CR 7 | log::info "Deploying ES CR..." 8 | oc -n "openshift-operators-redhat" create -f ${repo_dir}/hack/testing-olm-upgrade/resources/cr.yaml 9 | 10 | check_for_es_pods 11 | 12 | # get a list of the aliases and make sure that we have them based on the expected aliases 13 | try_func_until_result_is_not_empty get_es_aliases_names ${ES_POD_TIMEOUT} 14 | get_es_aliases_names > "${E2E_ARTIFACT_DIR}/old-aliases" 15 | 16 | try_func_until_result_is_not_empty get_es_indices_names ${ES_POD_TIMEOUT} 17 | get_es_indices_names > "${E2E_ARTIFACT_DIR}/old-indices" 18 | 19 | get_current_pvcs > "${E2E_ARTIFACT_DIR}/old-pvcs" 20 | -------------------------------------------------------------------------------- /hack/testing-olm-upgrade/resources/cr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "logging.openshift.io/v1" 3 | kind: "Elasticsearch" 4 | metadata: 5 | name: "elasticsearch" 6 | annotations: 7 | elasticsearch.openshift.io/loglevel: trace 8 | spec: 9 | managementState: "Managed" 10 | nodeSpec: 11 | resources: 12 | limits: 13 | memory: 1Gi 14 | requests: 15 | cpu: 100m 16 | memory: 1Gi 17 | nodes: 18 | - nodeCount: 3 19 | roles: 20 | - client 21 | - data 22 | - master 23 | storage: 24 | size: 10G 25 | redundancyPolicy: SingleRedundancy 26 | indexManagement: 27 | policies: 28 | - name: infra-policy 29 | pollInterval: 30m 30 | phases: 31 | hot: 32 | actions: 33 | rollover: 34 | maxAge: 10m 35 | delete: 36 | minAge: 20m 37 | mappings: 38 | - name: infra 39 | policyRef: infra-policy 40 | aliases: 41 | - infra 42 | - logs.infra 43 | -------------------------------------------------------------------------------- /hack/testing-olm-upgrade/test-upgrade-n-1-n.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Given an OLM manifest, verify a green field deployment 3 | # of elasticsearch by asserting EO creates and upgrades 4 | # the resources that beget the operands that make up logging. 5 | 6 | repo_dir="$( cd "$(dirname "$0")/../.." ; pwd -P )" 7 | source "$repo_dir/hack/testing-olm-upgrade/upgrade-common" 8 | 9 | DO_SETUP=${DO_SETUP:-"true"} 10 | trap cleanup exit 11 | 12 | if [ "${DO_SETUP}" == "true" ] ; then 13 | discover_versions 14 | log::info "Running upgrade test for: $previous_version -> $version" 15 | 16 | deploy_previous_version $previous_version 17 | deploy_es_secret 18 | 19 | "${repo_dir}"/hack/testing-olm-upgrade/pre-upgrade-commands.sh 20 | 21 | log::info "Deploying the ES operator from the catalog..." 22 | # deploy cluster logging catalog from local code 23 | "${repo_dir}"/olm_deploy/scripts/catalog-deploy.sh 24 | 25 | patch_subscription 26 | patch_minkube_version 27 | fi 28 | 29 | #verify deployment is rolled out 30 | check_deployment_rolled_out 31 | 32 | check_for_es_pods 33 | 34 | # wait here until we get indices expected based on the index management spec 35 | expected_aliases="$(get_expected_aliases)" 36 | 37 | # get a list of the aliases and make sure that we have them based on the expected aliases 38 | try_func_until_result_is_not_empty get_es_aliases_names ${ES_POD_TIMEOUT} 39 | new_aliases="$(get_es_aliases_names)" 40 | 41 | # read new 4.5 indices and map them by their names 42 | log::info "Reading new ES indices" 43 | try_func_until_result_is_not_empty get_es_indices_names ${ES_POD_TIMEOUT} 44 | new_indices=$(get_es_indices_names) 45 | 46 | log::info "Validating indices match" 47 | old_indices=$(cat "${E2E_ARTIFACT_DIR}"/old-indices) 48 | check_list_contained_in "$old_indices" "$new_indices" 49 | 50 | log::info "Validating expected aliases exist" 51 | check_list_contained_in "$expected_aliases" "$new_aliases" 52 | 53 | # check to make sure new_aliases is contained in expected_aliases 54 | log::info "Validating aliases match" 55 | old_aliases=$(cat "${E2E_ARTIFACT_DIR}"/old-aliases) 56 | check_list_contained_in "$old_aliases" "$new_aliases" 57 | 58 | current_pvcs="$(get_current_pvcs)" 59 | 60 | log::info "Validating PVCs haven't changed" 61 | # check to make sure the current list of pvcs is contained in (same as) old pvcs 62 | old_pvcs=$(cat "${E2E_ARTIFACT_DIR}"/old-pvcs) 63 | check_list_contained_in "$current_pvcs" "$old_pvcs" 64 | 65 | log::info "Test passed" 66 | -------------------------------------------------------------------------------- /hack/testing-olm/assertions: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/utils" 3 | 4 | assert_eo_exist(){ 5 | # verify ER 6 | os::cmd::try_until_success "oc -n $NAMESPACE get elasticsearch elasticsearch" ${TIMEOUT_MIN} 7 | } 8 | -------------------------------------------------------------------------------- /hack/testing-olm/test-001-operator-sdk-e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} 5 | 6 | repo_dir="$(dirname $0)/../.." 7 | source "${repo_dir}/hack/lib/init.sh" 8 | source "${repo_dir}/hack/testing-olm/utils" 9 | 10 | test_name="test-001-operator-sdk" 11 | 12 | test_artifact_dir=$ARTIFACT_DIR/$test_name 13 | if [ ! -d $test_artifact_dir ] ; then 14 | mkdir -p $test_artifact_dir 15 | fi 16 | 17 | TEST_NAMESPACE="${TEST_NAMESPACE:-e2e-test-${RANDOM}}" 18 | 19 | start_seconds=$(date +%s) 20 | cleanup(){ 21 | local return_code="$?" 22 | 23 | set +e 24 | os::log::info "Running cleanup" 25 | end_seconds=$(date +%s) 26 | runtime="$(($end_seconds - $start_seconds))s" 27 | 28 | if [ "$return_code" != "0" ] ; then 29 | gather_logging_resources ${TEST_NAMESPACE} $test_artifact_dir 30 | fi 31 | 32 | if [ "${SKIP_CLEANUP:-false}" == "false" ] ; then 33 | ${repo_dir}/olm_deploy/scripts/catalog-uninstall.sh 34 | ${repo_dir}/olm_deploy/scripts/operator-uninstall.sh 35 | oc delete ns/${TEST_NAMESPACE} --wait=true --ignore-not-found --force --grace-period=0 36 | fi 37 | 38 | set -e 39 | exit ${return_code} 40 | } 41 | trap cleanup exit 42 | 43 | if oc get namespace ${TEST_NAMESPACE} > /dev/null 2>&1 ; then 44 | echo using existing project ${TEST_NAMESPACE} 45 | else 46 | oc create namespace ${TEST_NAMESPACE} 47 | fi 48 | 49 | if [ "${DO_SETUP:-true}" == "true" ] ; then 50 | # install the catalog containing the elasticsearch operator csv 51 | export ELASTICSEARCH_OPERATOR_NAMESPACE=${TEST_NAMESPACE} 52 | deploy_elasticsearch_operator 53 | fi 54 | 55 | TEST_OPERATOR_NAMESPACE=${TEST_NAMESPACE} \ 56 | TEST_WATCH_NAMESPACE=${TEST_NAMESPACE} \ 57 | go test ./test/e2e/... -kubeconfig=${KUBECONFIG} -parallel=1 -timeout 1500s 2>&1 -run "TestKibana|TestElasticsearchCluster" | \ 58 | $GO_JUNIT_REPORT | awk '//,/<\/properties>/ {next} {print}' > "$JUNIT_REPORT_OUTPUT_DIR/$test_name.xml" 59 | -------------------------------------------------------------------------------- /hack/testing-olm/test-200-verify-es-metrics-access.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This test verifies only serviceaccounts with the desired rolebindings are 3 | # allowed to retrieve metrices from elasticsearch 4 | set -euo pipefail 5 | 6 | KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} 7 | 8 | repo_dir="$(dirname $0)/../.." 9 | source "${repo_dir}/hack/lib/init.sh" 10 | source "${repo_dir}/hack/testing-olm/utils" 11 | 12 | test_name="test-200-verify-es-metrics-access" 13 | 14 | test_artifact_dir=$ARTIFACT_DIR/$(basename ${BASH_SOURCE[0]}) 15 | if [ ! -d $test_artifact_dir ] ; then 16 | mkdir -p $test_artifact_dir 17 | fi 18 | 19 | os::test::junit::declare_suite_start "[Elasticsearch] Verify Metrics Access" 20 | 21 | suffix=$RANDOM 22 | TEST_NAMESPACE="${TEST_NAMESPACE:-e2e-test-${suffix}}" 23 | UNAUTHORIZED_SA="unauthorized-sa-${suffix}" 24 | AUTHORIZED_SA="authorized-sa-${suffix}" 25 | CLUSTERROLE="prometheus-k8s-${suffix}" 26 | 27 | start_seconds=$(date +%s) 28 | cleanup(){ 29 | local return_code="$?" 30 | 31 | os::test::junit::declare_suite_end 32 | 33 | set +e 34 | os::log::info "Running cleanup" 35 | end_seconds=$(date +%s) 36 | runtime="$(($end_seconds - $start_seconds))s" 37 | 38 | if [ "$return_code" != "0" ] ; then 39 | gather_logging_resources ${TEST_NAMESPACE} $test_artifact_dir 40 | fi 41 | 42 | if [ "${SKIP_CLEANUP:-false}" == "false" ] ; then 43 | for item in "ns/${TEST_NAMESPACE}" "ns/openshift-operators-redhat"; do 44 | oc delete $item --wait=true --ignore-not-found --force --grace-period=0 45 | done 46 | oc delete clusterrole ${CLUSTERROLE} >> $test_artifact_dir/cleanup.log 2>&1 ||: 47 | oc delete clusterrolebinding ${CLUSTERROLE} >> $test_artifact_dir/cleanup.log 2>&1 ||: 48 | oc delete clusterrolebinding view-${CLUSTERROLE} >> $test_artifact_dir/cleanup.log 2>&1 ||: 49 | oc delete clusterrolebinding view-${CLUSTERROLE}-unauth >> $test_artifact_dir/cleanup.log 2>&1 ||: 50 | fi 51 | 52 | set -e 53 | exit ${return_code} 54 | } 55 | trap cleanup exit 56 | 57 | for item in "${TEST_NAMESPACE}" "openshift-operators-redhat" ; do 58 | if oc get project ${item} > /dev/null 2>&1 ; then 59 | echo using existing project ${item} 60 | else 61 | oc create namespace ${item} 62 | fi 63 | done 64 | 65 | if [ "${DO_SETUP:-true}" == "true" ] ; then 66 | export ELASTICSEARCH_OPERATOR_NAMESPACE=${TEST_NAMESPACE} 67 | deploy_elasticsearch_operator 68 | fi 69 | 70 | CLUSTERROLE=${CLUSTERROLE} AUTHORIZED_SA=${AUTHORIZED_SA} UNAUTHORIZED_SA=${UNAUTHORIZED_SA} \ 71 | TEST_OPERATOR_NAMESPACE=${TEST_NAMESPACE} \ 72 | TEST_WATCH_NAMESPACE=${TEST_NAMESPACE} \ 73 | go test ./test/e2e/... -kubeconfig=${KUBECONFIG} -parallel=1 -timeout 1500s -run TestElasticsearchOperatorMetrics | \ 74 | $GO_JUNIT_REPORT | awk '//,/<\/properties>/ {next} {print}' > "$JUNIT_REPORT_OUTPUT_DIR/$test_name.xml" 75 | -------------------------------------------------------------------------------- /hack/testing-olm/test-657-im-block-autocreate-for-write-suffix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} 6 | 7 | repo_dir="$(dirname $0)/../.." 8 | source "${repo_dir}/hack/lib/init.sh" 9 | source "${repo_dir}/hack/testing-olm/utils" 10 | 11 | test_name="test-657-im-block-autocreate-for-write-suffix" 12 | 13 | test_artifact_dir=$ARTIFACT_DIR/$(basename ${BASH_SOURCE[0]}) 14 | if [ ! -d $test_artifact_dir ] ; then 15 | mkdir -p $test_artifact_dir 16 | fi 17 | 18 | os::test::junit::declare_suite_start "[Elasticsearch] Index Management Block Auto-Create For Write Suffix" 19 | 20 | TEST_NAMESPACE="${TEST_NAMESPACE:-e2e-test-${RANDOM}}" 21 | 22 | start_seconds=$(date +%s) 23 | cleanup(){ 24 | local return_code="$?" 25 | 26 | os::test::junit::declare_suite_end 27 | 28 | set +e 29 | os::log::info "Running cleanup" 30 | end_seconds=$(date +%s) 31 | runtime="$(($end_seconds - $start_seconds))s" 32 | 33 | if [ "$return_code" != "0" ] ; then 34 | gather_logging_resources ${TEST_NAMESPACE} $test_artifact_dir 35 | fi 36 | 37 | if [ "${SKIP_CLEANUP:-false}" == "false" ] ; then 38 | for item in "ns/${TEST_NAMESPACE}" "ns/openshift-operators-redhat"; do 39 | oc delete $item --wait=true --ignore-not-found --force --grace-period=0 40 | done 41 | fi 42 | 43 | set -e 44 | exit ${return_code} 45 | } 46 | trap cleanup exit 47 | 48 | for item in "${TEST_NAMESPACE}" "openshift-operators-redhat" ; do 49 | if oc get project ${item} > /dev/null 2>&1 ; then 50 | echo using existing project ${item} 51 | else 52 | oc create namespace ${item} 53 | fi 54 | done 55 | 56 | if [ "${DO_SETUP:-true}" == "true" ] ; then 57 | export ELASTICSEARCH_OPERATOR_NAMESPACE=${TEST_NAMESPACE} 58 | deploy_elasticsearch_operator 59 | fi 60 | 61 | TEST_WATCH_NAMESPACE=${TEST_NAMESPACE} TEST_OPERATOR_NAMESPACE=${TEST_NAMESPACE} \ 62 | go test ./test/e2e/... -kubeconfig=${KUBECONFIG} -parallel=1 -timeout 1500s -run TestElasticsearchWrite | \ 63 | $GO_JUNIT_REPORT | awk '//,/<\/properties>/ {next} {print}' > "$JUNIT_REPORT_OUTPUT_DIR/$test_name.xml" 64 | -------------------------------------------------------------------------------- /internal/constants/constants.go: -------------------------------------------------------------------------------- 1 | package constants 2 | 3 | import "github.com/openshift/elasticsearch-operator/internal/utils" 4 | 5 | const ( 6 | ProxyName = "cluster" 7 | OAuthName = "cluster" 8 | TrustedCABundleKey = "ca-bundle.crt" 9 | TrustedCABundleMountDir = "/etc/pki/ca-trust/extracted/pem/" 10 | TrustedCABundleMountFile = "tls-ca-bundle.pem" 11 | InjectTrustedCABundleLabel = "config.openshift.io/inject-trusted-cabundle" 12 | TrustedCABundleHashName = "logging.openshift.io/hash" 13 | KibanaTrustedCAName = "kibana-trusted-ca-bundle" 14 | SecretHashPrefix = "logging.openshift.io/" 15 | ElasticsearchDefaultImage = "quay.io/openshift-logging/elasticsearch6:6.8.1" 16 | ProxyDefaultImage = "quay.io/openshift-logging/elasticsearch-proxy:1.0" 17 | CuratorDefaultImage = "quay.io/openshift-logging/curator5:5.8.1" 18 | TheoreticalShardMaxSizeInMB = 40960 19 | 20 | // OcpTemplatePrefix is the prefix all operator generated templates 21 | OcpTemplatePrefix = "ocp-gen" 22 | 23 | SecurityIndex = ".security" 24 | 25 | EOCertManagementLabel = "logging.openshift.io/elasticsearch-cert-management" 26 | EOComponentCertPrefix = "logging.openshift.io/elasticsearch-cert." 27 | 28 | ConsoleDashboardLabel = "console.openshift.io/dashboard" 29 | LoggingHashLabel = "logging.openshift.io/hash" 30 | ElasticsearchDashboardFileName = "openshift-elasticsearch.json" 31 | ) 32 | 33 | var ( 34 | ReconcileForGlobalProxyList = []string{KibanaTrustedCAName} 35 | packagedCuratorImage = utils.LookupEnvWithDefault("RELATED_IMAGE_CURATOR", CuratorDefaultImage) 36 | ExpectedSecretKeys = []string{ 37 | "admin-ca", 38 | "admin-cert", 39 | "admin-key", 40 | "elasticsearch.crt", 41 | "elasticsearch.key", 42 | "logging-es.crt", 43 | "logging-es.key", 44 | } 45 | ) 46 | 47 | func PackagedCuratorImage() string { 48 | return packagedCuratorImage 49 | } 50 | -------------------------------------------------------------------------------- /internal/elasticsearch/cluster_test.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | "testing" 5 | 6 | elasticsearchv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1" 7 | "github.com/openshift/elasticsearch-operator/internal/elasticsearch/esclient" 8 | "github.com/openshift/elasticsearch-operator/test/helpers" 9 | appsv1 "k8s.io/api/apps/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 13 | ) 14 | 15 | func TestDiskUtilizationBelowFloodWatermark(t *testing.T) { 16 | nodes = map[string][]NodeTypeInterface{} 17 | var ( 18 | chatter *helpers.FakeElasticsearchChatter 19 | client esclient.Client 20 | k8sClient = fake.NewFakeClient() 21 | cluster = &elasticsearchv1.Elasticsearch{ 22 | ObjectMeta: metav1.ObjectMeta{ 23 | Name: "elasticsearch", 24 | Namespace: "openshift-logging", 25 | }, 26 | } 27 | ) 28 | 29 | const ( 30 | esCluster = "elasticsearch" 31 | esNamespace = "openshift-logging" 32 | ) 33 | 34 | chatter = helpers.NewFakeElasticsearchChatter(map[string]helpers.FakeElasticsearchResponses{ 35 | "_nodes/stats/fs": { 36 | { 37 | StatusCode: 200, 38 | Body: `{"nodes": {"7EN-Wa_EQC6LoANvWcoyHQ": {"name": "elasticsearch-cdm-1-deadbeef", "fs": {"total": {"total_in_bytes": 32737570816, "free_in_bytes": 16315211776, "available_in_bytes": 16315211776}}}}}`, 39 | }, 40 | }, 41 | }) 42 | client = helpers.NewFakeElasticsearchClient(esCluster, esNamespace, k8sClient, chatter) 43 | 44 | er := ElasticsearchRequest{ 45 | cluster: cluster, 46 | client: k8sClient, 47 | esClient: client, 48 | } 49 | 50 | // Populate nodes in operator memory 51 | key := nodeMapKey(esCluster, esNamespace) 52 | nodes[key] = populateSingleNode(esCluster) 53 | 54 | if isDiskUtilizationBelow := er.isDiskUtilizationBelowFloodWatermark(); isDiskUtilizationBelow != true { 55 | t.Errorf("Expected threshold value to be below 95 percent but got more.") 56 | } 57 | } 58 | 59 | func populateSingleNode(clusterName string) []NodeTypeInterface { 60 | nodes := []NodeTypeInterface{} 61 | deployments := []runtime.Object{ 62 | &appsv1.Deployment{ 63 | ObjectMeta: metav1.ObjectMeta{ 64 | Name: "elasticsearch-cdm-1-deadbeef", 65 | Namespace: "openshift-logging", 66 | }, 67 | }, 68 | } 69 | for _, dpl := range deployments { 70 | dpl := dpl.(*appsv1.Deployment) 71 | node := &deploymentNode{ 72 | clusterName: clusterName, 73 | self: *dpl, 74 | } 75 | nodes = append(nodes, node) 76 | } 77 | return nodes 78 | } 79 | -------------------------------------------------------------------------------- /internal/elasticsearch/cr.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | loggingv1 "github.com/openshift/elasticsearch-operator/apis/logging/v1" 8 | apierrors "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | ) 12 | 13 | func GetElasticsearchCR(c client.Client, ns string) (*loggingv1.Elasticsearch, error) { 14 | esl := &loggingv1.ElasticsearchList{} 15 | opts := &client.ListOptions{Namespace: ns} 16 | 17 | if err := c.List(context.TODO(), esl, opts); err != nil { 18 | if apierrors.IsNotFound(err) { 19 | return nil, err 20 | } 21 | 22 | return nil, kverrors.Wrap(err, "unable to get elasticsearch instance", 23 | "namespace", ns, 24 | ) 25 | } 26 | 27 | if len(esl.Items) == 0 { 28 | gr := schema.GroupResource{ 29 | Group: loggingv1.GroupVersion.Group, 30 | Resource: "Elasticsearch", 31 | } 32 | return nil, apierrors.NewNotFound(gr, "elasticsearch") 33 | } 34 | 35 | return &esl.Items[0], nil 36 | } 37 | -------------------------------------------------------------------------------- /internal/elasticsearch/defaults.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | api "github.com/openshift/elasticsearch-operator/apis/logging/v1" 8 | ) 9 | 10 | const ( 11 | modeUnique = "unique" 12 | modeSharedOps = "shared_ops" 13 | 14 | defaultMode = modeSharedOps 15 | // ES 16 | defaultESCpuRequest = "100m" 17 | defaultESMemoryLimit = "4Gi" 18 | defaultESMemoryRequest = "1Gi" 19 | // ESProxy 20 | defaultESProxyCPURequest = "100m" 21 | defaultESProxyMemoryLimit = "256Mi" 22 | defaultESProxyMemoryRequest = "256Mi" 23 | 24 | maxMasterCount = 3 25 | maxPrimaryShardCount = 5 26 | 27 | elasticsearchCertsPath = "/etc/openshift/elasticsearch/secret" 28 | elasticsearchConfigPath = "/usr/share/java/elasticsearch/config" 29 | heapDumpLocation = "/elasticsearch/persistent/heapdump.hprof" 30 | 31 | yellowClusterState = "yellow" 32 | greenClusterState = "green" 33 | ) 34 | 35 | var desiredClusterStates = []string{yellowClusterState, greenClusterState} 36 | 37 | func kibanaIndexMode(mode string) (string, error) { 38 | if mode == "" { 39 | return defaultMode, nil 40 | } 41 | if mode == modeUnique || mode == modeSharedOps { 42 | return mode, nil 43 | } 44 | return "", kverrors.New("invalid kibana index mode provided", 45 | "mode", mode) 46 | } 47 | 48 | func esUnicastHost(clusterName, namespace string) string { 49 | return fmt.Sprintf("%v-cluster.%v.svc", clusterName, namespace) 50 | } 51 | 52 | func CalculatePrimaryCount(dpl *api.Elasticsearch) int { 53 | dataNodeCount := int(GetDataCount(dpl)) 54 | if dataNodeCount > maxPrimaryShardCount { 55 | return maxPrimaryShardCount 56 | } 57 | 58 | // we can just return this without error checking because we validate 59 | // we have at least one data node in the cluster 60 | return dataNodeCount 61 | } 62 | 63 | func CalculateReplicaCount(dpl *api.Elasticsearch) int { 64 | dataNodeCount := int(GetDataCount(dpl)) 65 | repType := dpl.Spec.RedundancyPolicy 66 | switch repType { 67 | case api.FullRedundancy: 68 | return dataNodeCount - 1 69 | case api.MultipleRedundancy: 70 | return (dataNodeCount - 1) / 2 71 | case api.SingleRedundancy: 72 | return 1 73 | case api.ZeroRedundancy: 74 | return 0 75 | default: 76 | if dataNodeCount == 1 { 77 | return 0 78 | } 79 | return 1 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /internal/elasticsearch/defaults_test.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo" 5 | . "github.com/onsi/gomega" 6 | api "github.com/openshift/elasticsearch-operator/apis/logging/v1" 7 | ) 8 | 9 | var ( 10 | dpl *api.Elasticsearch 11 | dataNodeCount int 12 | dataNode api.ElasticsearchNode 13 | ) 14 | 15 | var _ = Describe("defaults", func() { 16 | defer GinkgoRecover() 17 | 18 | BeforeEach(func() { 19 | dataNode = api.ElasticsearchNode{ 20 | Roles: []api.ElasticsearchNodeRole{ 21 | api.ElasticsearchRoleClient, 22 | api.ElasticsearchRoleData, 23 | }, 24 | } 25 | }) 26 | 27 | Describe("#getPrimaryShardCount with excess data nodes", func() { 28 | JustBeforeEach(func() { 29 | dataNodeCount = 20 30 | dataNode.NodeCount = int32(dataNodeCount) 31 | 32 | dpl = &api.Elasticsearch{ 33 | Spec: api.ElasticsearchSpec{ 34 | Nodes: []api.ElasticsearchNode{ 35 | dataNode, 36 | }, 37 | }, 38 | } 39 | }) 40 | It("should return maxPrimaryShardCount", func() { 41 | Expect(CalculatePrimaryCount(dpl)).To(Equal(maxPrimaryShardCount)) 42 | }) 43 | }) 44 | 45 | Describe("#getPrimaryShardCount with 3 data nodes", func() { 46 | JustBeforeEach(func() { 47 | dataNodeCount = 3 48 | dataNode.NodeCount = int32(dataNodeCount) 49 | 50 | dpl = &api.Elasticsearch{ 51 | Spec: api.ElasticsearchSpec{ 52 | Nodes: []api.ElasticsearchNode{ 53 | dataNode, 54 | }, 55 | }, 56 | } 57 | }) 58 | It("should return data node count", func() { 59 | Expect(CalculatePrimaryCount(dpl)).To(Equal(dataNodeCount)) 60 | }) 61 | }) 62 | }) 63 | -------------------------------------------------------------------------------- /internal/elasticsearch/elasticsearch_suite_test.go: -------------------------------------------------------------------------------- 1 | package elasticsearch_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestElasticsearchSuite(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Elasticsearch Suite") 13 | } 14 | -------------------------------------------------------------------------------- /internal/elasticsearch/esclient/client_test.go: -------------------------------------------------------------------------------- 1 | package esclient 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/ViaQ/logerr/v2/log" 7 | ) 8 | 9 | func TestHeaderGenEmptyToken(t *testing.T) { 10 | logger := log.NewLogger("client-testing") 11 | tokenFile := "../../../test/files/emptyToken" 12 | 13 | _, ok := readSAToken(logger, tokenFile) 14 | 15 | if ok { 16 | t.Errorf("Expected to be unable to read file [%s] due to empty file but could", tokenFile) 17 | } 18 | } 19 | 20 | func TestHeaderGenWithToken(t *testing.T) { 21 | logger := log.NewLogger("client-testing") 22 | tokenFile := "../../../test/files/testToken" 23 | 24 | expected := "test\n" 25 | 26 | actual, ok := readSAToken(logger, tokenFile) 27 | 28 | if !ok { 29 | t.Errorf("Expected to be able to read file [%s] but couldn't", tokenFile) 30 | } else { 31 | if actual != expected { 32 | t.Errorf("Expected %q but got %q", expected, actual) 33 | } 34 | } 35 | } 36 | 37 | func TestHeaderGenWithNoToken(t *testing.T) { 38 | logger := log.NewLogger("client-testing") 39 | tokenFile := "../../../test/files/errorToken" 40 | 41 | _, ok := readSAToken(logger, tokenFile) 42 | 43 | if ok { 44 | t.Errorf("Expected to be unable to read file [%s]", tokenFile) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /internal/elasticsearch/esclient/health.go: -------------------------------------------------------------------------------- 1 | package esclient 2 | 3 | import ( 4 | "net/http" 5 | 6 | api "github.com/openshift/elasticsearch-operator/apis/logging/v1" 7 | ) 8 | 9 | func (ec *esClient) GetClusterHealth() (api.ClusterHealth, error) { 10 | clusterHealth := api.ClusterHealth{} 11 | 12 | payload := &EsRequest{ 13 | Method: http.MethodGet, 14 | URI: "_cluster/health", 15 | } 16 | 17 | ec.fnSendEsRequest(ec.log, ec.cluster, ec.namespace, payload, ec.k8sClient) 18 | 19 | if payload.Error != nil { 20 | return clusterHealth, payload.Error 21 | } 22 | 23 | clusterHealth.Status = parseString("status", payload.ResponseBody) 24 | clusterHealth.NumNodes = parseInt32("number_of_nodes", payload.ResponseBody) 25 | clusterHealth.NumDataNodes = parseInt32("number_of_data_nodes", payload.ResponseBody) 26 | clusterHealth.ActivePrimaryShards = parseInt32("active_primary_shards", payload.ResponseBody) 27 | clusterHealth.ActiveShards = parseInt32("active_shards", payload.ResponseBody) 28 | clusterHealth.RelocatingShards = parseInt32("relocating_shards", payload.ResponseBody) 29 | clusterHealth.InitializingShards = parseInt32("initializing_shards", payload.ResponseBody) 30 | clusterHealth.UnassignedShards = parseInt32("unassigned_shards", payload.ResponseBody) 31 | clusterHealth.PendingTasks = parseInt32("number_of_pending_tasks", payload.ResponseBody) 32 | 33 | return clusterHealth, nil 34 | } 35 | 36 | func (ec *esClient) GetClusterHealthStatus() (string, error) { 37 | payload := &EsRequest{ 38 | Method: http.MethodGet, 39 | URI: "_cluster/health", 40 | } 41 | 42 | ec.fnSendEsRequest(ec.log, ec.cluster, ec.namespace, payload, ec.k8sClient) 43 | 44 | status := "" 45 | if payload.ResponseBody["status"] != nil { 46 | if statusString, ok := payload.ResponseBody["status"].(string); ok { 47 | status = statusString 48 | } 49 | } 50 | 51 | return status, payload.Error 52 | } 53 | 54 | func (ec *esClient) GetClusterNodeCount() (int32, error) { 55 | payload := &EsRequest{ 56 | Method: http.MethodGet, 57 | URI: "_cluster/health", 58 | } 59 | 60 | ec.fnSendEsRequest(ec.log, ec.cluster, ec.namespace, payload, ec.k8sClient) 61 | 62 | nodeCount := int32(0) 63 | if nodeCountFloat, ok := payload.ResponseBody["number_of_nodes"].(float64); ok { 64 | // we expect at most double digit numbers here, eg cluster with 15 nodes 65 | nodeCount = int32(nodeCountFloat) 66 | } 67 | 68 | return nodeCount, payload.Error 69 | } 70 | -------------------------------------------------------------------------------- /internal/elasticsearch/esclient/helper.go: -------------------------------------------------------------------------------- 1 | package esclient 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func parseBool(path string, interfaceMap map[string]interface{}) bool { 8 | value := walkInterfaceMap(path, interfaceMap) 9 | 10 | if parsedBool, ok := value.(bool); ok { 11 | return parsedBool 12 | } else { 13 | return false 14 | } 15 | } 16 | 17 | func parseString(path string, interfaceMap map[string]interface{}) string { 18 | value := walkInterfaceMap(path, interfaceMap) 19 | 20 | if parsedString, ok := value.(string); ok { 21 | return parsedString 22 | } else { 23 | return "" 24 | } 25 | } 26 | 27 | func parseInt32(path string, interfaceMap map[string]interface{}) int32 { 28 | return int32(parseFloat64(path, interfaceMap)) 29 | } 30 | 31 | func parseFloat64(path string, interfaceMap map[string]interface{}) float64 { 32 | value := walkInterfaceMap(path, interfaceMap) 33 | 34 | if parsedFloat, ok := value.(float64); ok { 35 | return parsedFloat 36 | } else { 37 | return float64(-1) 38 | } 39 | } 40 | 41 | func walkInterfaceMap(path string, interfaceMap map[string]interface{}) interface{} { 42 | current := interfaceMap 43 | keys := strings.Split(path, ".") 44 | keyCount := len(keys) 45 | 46 | for index, key := range keys { 47 | if current[key] != nil { 48 | if index+1 < keyCount { 49 | current = current[key].(map[string]interface{}) 50 | } else { 51 | return current[key] 52 | } 53 | } else { 54 | return nil 55 | } 56 | } 57 | 58 | return nil 59 | } 60 | -------------------------------------------------------------------------------- /internal/elasticsearch/esclient/nodes.go: -------------------------------------------------------------------------------- 1 | package esclient 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "strings" 7 | 8 | "github.com/inhies/go-bytesize" 9 | ) 10 | 11 | func (ec *esClient) GetNodeDiskUsage(nodeName string) (string, float64, error) { 12 | payload := &EsRequest{ 13 | Method: http.MethodGet, 14 | URI: "_nodes/stats/fs", 15 | } 16 | 17 | ec.fnSendEsRequest(ec.log, ec.cluster, ec.namespace, payload, ec.k8sClient) 18 | 19 | usage := "" 20 | percentUsage := float64(-1) 21 | 22 | if payload, ok := payload.ResponseBody["nodes"].(map[string]interface{}); ok { 23 | for _, stats := range payload { 24 | // ignore the key name here, it is the node UUID 25 | if parseString("name", stats.(map[string]interface{})) == nodeName { 26 | total := parseFloat64("fs.total.total_in_bytes", stats.(map[string]interface{})) 27 | available := parseFloat64("fs.total.available_in_bytes", stats.(map[string]interface{})) 28 | 29 | percentUsage = (total - available) / total * 100.00 30 | usage = strings.TrimSuffix(fmt.Sprintf("%s", bytesize.New(total)-bytesize.New(available)), "B") 31 | 32 | break 33 | } 34 | } 35 | } 36 | 37 | return usage, percentUsage, payload.Error 38 | } 39 | -------------------------------------------------------------------------------- /internal/elasticsearch/esclient/shards.go: -------------------------------------------------------------------------------- 1 | package esclient 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | 7 | api "github.com/openshift/elasticsearch-operator/apis/logging/v1" 8 | ) 9 | 10 | func (ec *esClient) ClearTransientShardAllocation() (bool, error) { 11 | payload := &EsRequest{ 12 | Method: http.MethodPut, 13 | URI: "_cluster/settings", 14 | RequestBody: fmt.Sprintf("{%q:{%q:null}}", "transient", "cluster.routing.allocation.enable"), 15 | } 16 | 17 | ec.fnSendEsRequest(ec.log, ec.cluster, ec.namespace, payload, ec.k8sClient) 18 | 19 | acknowledged := false 20 | if acknowledgedBool, ok := payload.ResponseBody["acknowledged"].(bool); ok { 21 | acknowledged = acknowledgedBool 22 | } 23 | return payload.StatusCode == 200 && acknowledged, ec.errorCtx().Wrap(payload.Error, "failed to clear shard allocation", 24 | "response", payload.RawResponseBody) 25 | } 26 | 27 | func (ec *esClient) SetShardAllocation(state api.ShardAllocationState) (bool, error) { 28 | payload := &EsRequest{ 29 | Method: http.MethodPut, 30 | URI: "_cluster/settings", 31 | RequestBody: fmt.Sprintf("{%q:{%q:%q}}", "persistent", "cluster.routing.allocation.enable", state), 32 | } 33 | 34 | ec.fnSendEsRequest(ec.log, ec.cluster, ec.namespace, payload, ec.k8sClient) 35 | 36 | acknowledged := false 37 | if acknowledgedBool, ok := payload.ResponseBody["acknowledged"].(bool); ok { 38 | acknowledged = acknowledgedBool 39 | } 40 | return payload.StatusCode == 200 && acknowledged, ec.errorCtx().Wrap(payload.Error, "failed to set shard allocation") 41 | } 42 | 43 | func (ec *esClient) GetShardAllocation() (string, error) { 44 | payload := &EsRequest{ 45 | Method: http.MethodGet, 46 | URI: "_cluster/settings?include_defaults=true", 47 | } 48 | 49 | ec.fnSendEsRequest(ec.log, ec.cluster, ec.namespace, payload, ec.k8sClient) 50 | 51 | var allocation interface{} 52 | 53 | if value := walkInterfaceMap( 54 | "defaults.cluster.routing.allocation.enable", 55 | payload.ResponseBody); value != nil { 56 | allocation = value 57 | } 58 | 59 | if value := walkInterfaceMap( 60 | "persistent.cluster.routing.allocation.enable", 61 | payload.ResponseBody); value != nil { 62 | allocation = value 63 | } 64 | 65 | if value := walkInterfaceMap( 66 | "transient.cluster.routing.allocation.enable", 67 | payload.ResponseBody); value != nil { 68 | allocation = value 69 | } 70 | 71 | allocationString, ok := allocation.(string) 72 | if !ok { 73 | allocationString = "" 74 | } 75 | 76 | return allocationString, payload.Error 77 | } 78 | -------------------------------------------------------------------------------- /internal/elasticsearch/esclient/templates_test.go: -------------------------------------------------------------------------------- 1 | package esclient_test 2 | 3 | import ( 4 | "net/http" 5 | "testing" 6 | 7 | "github.com/ViaQ/logerr/v2/kverrors" 8 | estypes "github.com/openshift/elasticsearch-operator/internal/types/elasticsearch" 9 | testhelpers "github.com/openshift/elasticsearch-operator/test/helpers" 10 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 11 | ) 12 | 13 | var ( 14 | cluster = "elasticsearch" 15 | namespace = "openshift-logging" 16 | k8sClient = fake.NewFakeClient() 17 | indexTemplate = estypes.NewIndexTemplate("abc-**", []string{"foo"}, 1, 0) 18 | ) 19 | 20 | func TestCreateIndexTemplateWhenError(t *testing.T) { 21 | chatter := testhelpers.NewFakeElasticsearchChatter( 22 | map[string]testhelpers.FakeElasticsearchResponses{ 23 | "_template/foo": { 24 | { 25 | Error: kverrors.New("test error", "test_name", t.Name()), 26 | StatusCode: http.StatusInternalServerError, 27 | Body: "{}", 28 | }, 29 | }, 30 | }) 31 | esClient := testhelpers.NewFakeElasticsearchClient(cluster, namespace, k8sClient, chatter) 32 | 33 | if esClient.CreateIndexTemplate("foo", indexTemplate) == nil { 34 | t.Error("Exp. to return an error but did not") 35 | } 36 | } 37 | 38 | func TestCreateIndexTemplateWhenResponseNot200(t *testing.T) { 39 | chatter := testhelpers.NewFakeElasticsearchChatter( 40 | map[string]testhelpers.FakeElasticsearchResponses{ 41 | "_template/foo": { 42 | { 43 | Error: nil, 44 | StatusCode: 500, 45 | Body: "{}", 46 | }, 47 | }, 48 | }) 49 | esClient := testhelpers.NewFakeElasticsearchClient(cluster, namespace, k8sClient, chatter) 50 | 51 | if esClient.CreateIndexTemplate("foo", indexTemplate) == nil { 52 | t.Error("Exp. to return an error but did not") 53 | } 54 | } 55 | 56 | func TestCreateIndexTemplateWhenResponse200(t *testing.T) { 57 | chatter := testhelpers.NewFakeElasticsearchChatter( 58 | map[string]testhelpers.FakeElasticsearchResponses{ 59 | "_template/foo": { 60 | { 61 | Error: nil, 62 | StatusCode: 200, 63 | Body: "{}", 64 | }, 65 | }, 66 | }) 67 | esClient := testhelpers.NewFakeElasticsearchClient(cluster, namespace, k8sClient, chatter) 68 | 69 | if err := esClient.CreateIndexTemplate("foo", indexTemplate); err != nil { 70 | t.Errorf("Exp. to not return an error %v", err) 71 | } 72 | } 73 | 74 | func TestCreateIndexTemplateWhenResponse201(t *testing.T) { 75 | chatter := testhelpers.NewFakeElasticsearchChatter( 76 | map[string]testhelpers.FakeElasticsearchResponses{ 77 | "_template/foo": { 78 | { 79 | Error: nil, 80 | StatusCode: 201, 81 | Body: "{}", 82 | }, 83 | }, 84 | }) 85 | esClient := testhelpers.NewFakeElasticsearchClient(cluster, namespace, k8sClient, chatter) 86 | 87 | if err := esClient.CreateIndexTemplate("foo", indexTemplate); err != nil { 88 | t.Errorf("Exp. to not return an error %v", err) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /internal/elasticsearch/prometheus_rule_test.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo" 5 | . "github.com/onsi/gomega" 6 | ) 7 | 8 | var ( 9 | rulePath = "../../files/prometheus_recording_rules.yml" 10 | alertPath = "../../files/prometheus_alerts.yml" 11 | ) 12 | 13 | var _ = Describe("prometheusrules", func() { 14 | defer GinkgoRecover() 15 | 16 | Context("rules", func() { 17 | It("should build without errors", func() { 18 | _, err := ruleSpec("prometheus_recording_rules.yml", rulePath) 19 | 20 | Expect(err).To(BeNil()) 21 | }) 22 | }) 23 | 24 | Context("alerts", func() { 25 | It("should build without errors", func() { 26 | _, err := ruleSpec("prometheus_alerts.yml", alertPath) 27 | 28 | Expect(err).To(BeNil()) 29 | }) 30 | }) 31 | }) 32 | -------------------------------------------------------------------------------- /internal/elasticsearch/secret.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/openshift/elasticsearch-operator/internal/constants" 9 | "github.com/openshift/elasticsearch-operator/internal/manifests/secret" 10 | 11 | "github.com/ViaQ/logerr/v2/kverrors" 12 | apierrors "k8s.io/apimachinery/pkg/api/errors" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | func CreateOrUpdateSecretWithOwnerRef(secretName, namespace string, data map[string][]byte, client client.Client, ownerRef metav1.OwnerReference) error { 18 | s := secret.New(secretName, namespace, data) 19 | 20 | // add owner ref to secret 21 | s.OwnerReferences = append(s.OwnerReferences, ownerRef) 22 | 23 | err := secret.CreateOrUpdate(context.TODO(), client, s, secret.DataEqual, secret.MutateDataOnly) 24 | if err != nil { 25 | return kverrors.Wrap(err, "failed to create or update elasticsearch secret", 26 | "owner_ref_name", ownerRef.Name, 27 | ) 28 | } 29 | 30 | return nil 31 | } 32 | 33 | // hasRequiredSecrets will check that all secrets that we expect for EO to be able to communicate 34 | // with the ES cluster it manages exist. 35 | // It will return true if all required secrets/keys exist. 36 | // Otherwise, it will return false and the message will be populated with what is missing. 37 | func (er ElasticsearchRequest) hasRequiredSecrets() (bool, string) { 38 | message := "" 39 | hasRequired := true 40 | 41 | key := client.ObjectKey{Name: er.cluster.Name, Namespace: er.cluster.Namespace} 42 | sec, err := secret.Get(context.TODO(), er.client, key) 43 | 44 | // check that the secret is there 45 | if apierrors.IsNotFound(kverrors.Root(err)) { 46 | return false, fmt.Sprintf("Expected secret %q in namespace %q is missing", er.cluster.Name, er.cluster.Namespace) 47 | } 48 | 49 | var missingCerts []string 50 | var secretKeys []string 51 | 52 | for key, data := range sec.Data { 53 | // check that the fields aren't blank 54 | if string(data) == "" { 55 | missingCerts = append(missingCerts, key) 56 | } 57 | 58 | secretKeys = append(secretKeys, key) 59 | } 60 | 61 | // check the fields are there 62 | for _, key := range constants.ExpectedSecretKeys { 63 | if !sliceContainsString(secretKeys, key) { 64 | missingCerts = append(missingCerts, key) 65 | } 66 | } 67 | 68 | if len(missingCerts) > 0 { 69 | message = fmt.Sprintf("Secret %q fields are either missing or empty: [%s]", er.cluster.Name, strings.Join(missingCerts, ", ")) 70 | hasRequired = false 71 | } 72 | 73 | return hasRequired, message 74 | } 75 | -------------------------------------------------------------------------------- /internal/elasticsearch/securitycontextconstraints.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | "context" 5 | 6 | securityv1 "github.com/openshift/api/security/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | 9 | "github.com/ViaQ/logerr/v2/kverrors" 10 | "github.com/openshift/elasticsearch-operator/internal/manifests/securitycontextconstraints" 11 | ) 12 | 13 | // CreateOrUpdateSecurityContextConstraints ensures the existence of the securitycontextconstraints for Elasticsearch cluster 14 | func (er *ElasticsearchRequest) CreateOrUpdateSecurityContextConstraints() error { 15 | dpl := er.cluster 16 | 17 | // This scc prevents a container from running as privileged 18 | // It allows the pod access to the hostPath volume type and marks it as not read-only 19 | builder := securitycontextconstraints.New("elasticsearch-scc", false, true, false) 20 | 21 | // Disasbles all sysctls from being executed in the pod 22 | builder.WithForbiddenSysctls([]string{ 23 | "*", 24 | }) 25 | // Allows the pod to be able to use the requested volume types 26 | builder.WithVolumes([]securityv1.FSType{ 27 | "configMap", 28 | "secret", 29 | "emptyDir", 30 | "persistentVolumeClaim", 31 | }) 32 | // Drops these capabilities and prevents them from being added to the pod 33 | builder.WithRequiredDropCapabilities([]corev1.Capability{ 34 | "CHOWN", 35 | "DAC_OVERRIDE", 36 | "FSETID", 37 | "FOWNER", 38 | "SETGID", 39 | "SETUID", 40 | "SETPCAP", 41 | "NET_BIND_SERVICE", 42 | "KILL", 43 | }) 44 | // Prevents the processes and pod from gaining more privileges than it is allowed 45 | builder.WithAllowPrivilegeEscalation(false) 46 | builder.WithDefaultAllowPrivilegeEscalation(false) 47 | // Does not set a default user or selinuxcontext value 48 | // These values can be added from the pod specification 49 | builder.WithRunAsUserOptions(securityv1.RunAsUserStrategyOptions{ 50 | Type: securityv1.RunAsUserStrategyRunAsAny, 51 | }) 52 | builder.WithSELinuxContextOptions(securityv1.SELinuxContextStrategyOptions{ 53 | Type: securityv1.SELinuxStrategyRunAsAny, 54 | }) 55 | 56 | scc := builder.Build() 57 | 58 | err := securitycontextconstraints.CreateOrUpdate(context.TODO(), er.client, scc, securitycontextconstraints.Equal, securitycontextconstraints.Mutate) 59 | if err != nil { 60 | return kverrors.Wrap(err, "failed to create or update elasticsearch securitycontextconstraints", 61 | "cluster", dpl.Name, 62 | ) 63 | } 64 | 65 | return nil 66 | } 67 | -------------------------------------------------------------------------------- /internal/elasticsearch/service_monitor.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/ViaQ/logerr/v2/kverrors" 8 | "github.com/openshift/elasticsearch-operator/internal/manifests/servicemonitor" 9 | 10 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 11 | 12 | corev1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | ) 15 | 16 | const ( 17 | prometheusCAFile = "service-ca.crt" 18 | ) 19 | 20 | // CreateOrUpdateServiceMonitors ensures the existence of ServiceMonitors for Elasticsearch cluster 21 | func (er *ElasticsearchRequest) CreateOrUpdateServiceMonitors() error { 22 | dpl := er.cluster 23 | 24 | serviceMonitorName := fmt.Sprintf("monitor-%s-%s", dpl.Name, "cluster") 25 | 26 | labelsWithDefault := appendDefaultLabel(dpl.Name, dpl.Labels) 27 | labelsSelector := appendDefaultLabel(dpl.Name, map[string]string{ 28 | "scrape-metrics": "enabled", 29 | }) 30 | 31 | tlsConfig := monitoringv1.TLSConfig{ 32 | SafeTLSConfig: monitoringv1.SafeTLSConfig{ 33 | CA: monitoringv1.SecretOrConfigMap{ 34 | ConfigMap: &corev1.ConfigMapKeySelector{ 35 | LocalObjectReference: corev1.LocalObjectReference{ 36 | Name: serviceCABundleName(dpl.Name), 37 | }, 38 | Key: prometheusCAFile, 39 | }, 40 | }, 41 | // ServerName can be e.g. elasticsearch-metrics.openshift-logging.svc 42 | ServerName: fmt.Sprintf("%s-%s.%s.svc", dpl.Name, "metrics", dpl.Namespace), 43 | }, 44 | } 45 | 46 | tokenSecret := corev1.SecretKeySelector{ 47 | LocalObjectReference: corev1.LocalObjectReference{ 48 | Name: serviceMonitorServiceAccountTokenName(dpl.Name), 49 | }, 50 | Key: "token", 51 | } 52 | 53 | endpoints := []monitoringv1.Endpoint{ 54 | { 55 | Port: dpl.Name, 56 | Path: "/metrics", 57 | Scheme: "https", 58 | TLSConfig: &tlsConfig, 59 | BearerTokenSecret: tokenSecret, 60 | }, 61 | { 62 | Port: dpl.Name, 63 | Path: "/_prometheus/metrics", 64 | Scheme: "https", 65 | TLSConfig: &tlsConfig, 66 | BearerTokenSecret: tokenSecret, 67 | }, 68 | } 69 | 70 | monitor := servicemonitor.New(serviceMonitorName, dpl.Namespace, labelsWithDefault). 71 | WithJobLabel("monitor-elasticsearch"). 72 | WithSelector(metav1.LabelSelector{ 73 | MatchLabels: labelsSelector, 74 | }). 75 | WithNamespaceSelector(monitoringv1.NamespaceSelector{ 76 | MatchNames: []string{dpl.Namespace}, 77 | }). 78 | WithEndpoints(endpoints...). 79 | Build() 80 | 81 | dpl.AddOwnerRefTo(monitor) 82 | 83 | err := servicemonitor.CreateOrUpdate(context.TODO(), er.client, monitor, servicemonitor.Equal, servicemonitor.Mutate) 84 | if err != nil { 85 | return kverrors.Wrap(err, "failed to create or update elasticsearch servicemonitor", 86 | "cluster", er.cluster.Name, 87 | "namespace", er.cluster.Namespace, 88 | ) 89 | } 90 | 91 | return nil 92 | } 93 | -------------------------------------------------------------------------------- /internal/indexmanagement/converters.go: -------------------------------------------------------------------------------- 1 | package indexmanagement 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/ViaQ/logerr/v2/kverrors" 8 | apis "github.com/openshift/elasticsearch-operator/apis/logging/v1" 9 | "github.com/openshift/elasticsearch-operator/internal/constants" 10 | ) 11 | 12 | func calculateConditions(policy apis.IndexManagementPolicySpec, primaryShards int32) rolloverConditions { 13 | // 40GB = 40960 1K messages 14 | maxDoc := constants.TheoreticalShardMaxSizeInMB * 1000 * primaryShards 15 | maxSize := defaultShardSize * primaryShards 16 | maxAge := "" 17 | if policy.Phases.Hot != nil && policy.Phases.Hot.Actions.Rollover != nil { 18 | maxAge = string(policy.Phases.Hot.Actions.Rollover.MaxAge) 19 | } 20 | return rolloverConditions{ 21 | MaxSize: fmt.Sprintf("%dgb", maxSize), 22 | MaxDocs: maxDoc, 23 | MaxAge: maxAge, 24 | } 25 | } 26 | 27 | func calculateMillisForTimeUnit(timeunit apis.TimeUnit) (uint64, error) { 28 | match := reTimeUnit.FindStringSubmatch(string(timeunit)) 29 | if match == nil || len(match) < 2 { 30 | return 0, kverrors.New("unable to convert timeunit to millis for invalid timeunit", 31 | "unit", timeunit) 32 | } 33 | n := match[1] 34 | number, err := strconv.ParseUint(n, 10, 0) 35 | if err != nil { 36 | return 0, kverrors.Wrap(err, "unable to parse uint", "value", n) 37 | } 38 | switch match[2] { 39 | case "w": 40 | return number * millisPerWeek, nil 41 | case "d": 42 | return number * millisPerDay, nil 43 | case "h", "H": 44 | return number * millisPerHour, nil 45 | case "m": 46 | return number * millisPerMinute, nil 47 | case "s": 48 | return number * millisPerSecond, nil 49 | } 50 | return 0, kverrors.New("conversion to millis for time unit is unsupported", "timeunit", match[2]) 51 | } 52 | 53 | func crontabScheduleFor(timeunit apis.TimeUnit) (string, error) { 54 | match := reTimeUnit.FindStringSubmatch(string(timeunit)) 55 | if match == nil { 56 | return "", kverrors.New("Unable to create crontab schedule for invalid timeunit", "timeunit", timeunit) 57 | } 58 | switch match[2] { 59 | case "m": 60 | return fmt.Sprintf("*/%s * * * *", match[1]), nil 61 | } 62 | 63 | return "", kverrors.New("crontab schedule for time unit is unsupported", "timeunit", match[2]) 64 | } 65 | -------------------------------------------------------------------------------- /internal/indexmanagement/indexmanagement_suite_test.go: -------------------------------------------------------------------------------- 1 | package indexmanagement_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestIndexManagement(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "IndexManagement Suite") 13 | } 14 | -------------------------------------------------------------------------------- /internal/kibana/defaults.go: -------------------------------------------------------------------------------- 1 | package kibana 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/api/resource" 5 | ) 6 | 7 | var ( 8 | defaultKibanaMemory = resource.MustParse("736Mi") 9 | defaultKibanaCPURequest = resource.MustParse("100m") 10 | 11 | defaultKibanaProxyMemory = resource.MustParse("256Mi") 12 | defaultKibanaProxyCPURequest = resource.MustParse("100m") 13 | kibanaDefaultImage = "quay.io/openshift-logging/kibana6:6.8.1" 14 | ) 15 | -------------------------------------------------------------------------------- /internal/kibana/deployment.go: -------------------------------------------------------------------------------- 1 | package kibana 2 | 3 | import ( 4 | "github.com/openshift/elasticsearch-operator/internal/manifests/deployment" 5 | 6 | apps "k8s.io/api/apps/v1" 7 | core "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // NewDeployment stubs an instance of a Deployment 12 | func NewDeployment(deploymentName string, namespace string, loggingComponent string, component string, replicas int32, podSpec core.PodSpec) *apps.Deployment { 13 | labels := map[string]string{ 14 | "provider": "openshift", 15 | "component": "kibana", 16 | "logging-infra": "kibana", 17 | } 18 | 19 | kibanaDeployment := deployment.New("kibana", namespace, labels, replicas). 20 | WithSelector(metav1.LabelSelector{ 21 | MatchLabels: labels, 22 | }). 23 | WithStrategy(apps.RollingUpdateDeploymentStrategyType). 24 | WithTemplate(core.PodTemplateSpec{ 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: "kibana", 27 | Labels: labels, 28 | }, 29 | Spec: podSpec, 30 | }). 31 | Build() 32 | 33 | return kibanaDeployment 34 | } 35 | -------------------------------------------------------------------------------- /internal/kibana/helpers_test.go: -------------------------------------------------------------------------------- 1 | package kibana 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | "k8s.io/apimachinery/pkg/api/resource" 6 | ) 7 | 8 | func newResourceRequirements(limitMem string, limitCPU string, requestMem string, requestCPU string) *v1.ResourceRequirements { 9 | resources := v1.ResourceRequirements{ 10 | Limits: v1.ResourceList{}, 11 | Requests: v1.ResourceList{}, 12 | } 13 | if limitMem != "" { 14 | resources.Limits[v1.ResourceMemory] = resource.MustParse(limitMem) 15 | } 16 | if limitCPU != "" { 17 | resources.Limits[v1.ResourceCPU] = resource.MustParse(limitCPU) 18 | } 19 | if requestMem != "" { 20 | resources.Requests[v1.ResourceMemory] = resource.MustParse(requestMem) 21 | } 22 | if requestCPU != "" { 23 | resources.Requests[v1.ResourceCPU] = resource.MustParse(requestCPU) 24 | } 25 | return &resources 26 | } 27 | -------------------------------------------------------------------------------- /internal/kibana/kibana_suite_test.go: -------------------------------------------------------------------------------- 1 | package kibana 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestKibanaSuite(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Kibana Suite") 13 | } 14 | -------------------------------------------------------------------------------- /internal/kibana/kibanarequest.go: -------------------------------------------------------------------------------- 1 | package kibana 2 | 3 | import ( 4 | "context" 5 | 6 | kibana "github.com/openshift/elasticsearch-operator/apis/logging/v1" 7 | "github.com/openshift/elasticsearch-operator/internal/elasticsearch/esclient" 8 | 9 | "github.com/go-logr/logr" 10 | "k8s.io/apimachinery/pkg/types" 11 | "k8s.io/client-go/util/retry" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | type KibanaRequest struct { 16 | log logr.Logger 17 | client client.Client 18 | cluster *kibana.Kibana 19 | esClient esclient.Client 20 | } 21 | 22 | // TODO: determine if this is even necessary 23 | func (clusterRequest *KibanaRequest) isManaged() bool { 24 | return clusterRequest.cluster.Spec.ManagementState == kibana.ManagementStateManaged 25 | } 26 | 27 | func (clusterRequest *KibanaRequest) Create(object client.Object) error { 28 | return clusterRequest.client.Create(context.TODO(), object) 29 | } 30 | 31 | // Update the runtime Object or return error 32 | func (clusterRequest *KibanaRequest) Update(object client.Object) error { 33 | return clusterRequest.client.Update(context.TODO(), object) 34 | } 35 | 36 | func (clusterRequest *KibanaRequest) UpdateStatus() error { 37 | return retry.RetryOnConflict(retry.DefaultRetry, func() error { 38 | kibanaStatus, err := clusterRequest.getKibanaStatus() 39 | if err != nil { 40 | return err 41 | } 42 | 43 | if !compareKibanaStatus(kibanaStatus, clusterRequest.cluster.Status) { 44 | clusterRequest.cluster.Status = kibanaStatus 45 | return clusterRequest.client.Status().Update(context.TODO(), clusterRequest.cluster) 46 | } 47 | 48 | return nil 49 | }) 50 | } 51 | 52 | func (clusterRequest *KibanaRequest) Get(objectName string, object client.Object) error { 53 | namespacedName := types.NamespacedName{Name: objectName, Namespace: clusterRequest.cluster.Namespace} 54 | return clusterRequest.client.Get(context.TODO(), namespacedName, object) 55 | } 56 | 57 | func (clusterRequest *KibanaRequest) GetClusterResource(objectName string, object client.Object) error { 58 | namespacedName := types.NamespacedName{Name: objectName} 59 | err := clusterRequest.client.Get(context.TODO(), namespacedName, object) 60 | return err 61 | } 62 | 63 | func (clusterRequest *KibanaRequest) List(selector map[string]string, object client.ObjectList) error { 64 | listOpts := []client.ListOption{ 65 | client.InNamespace(clusterRequest.cluster.Namespace), 66 | client.MatchingLabels(selector), 67 | } 68 | 69 | return clusterRequest.client.List( 70 | context.TODO(), 71 | object, 72 | listOpts..., 73 | ) 74 | } 75 | 76 | func (clusterRequest *KibanaRequest) Delete(object client.Object) error { 77 | return clusterRequest.client.Delete(context.TODO(), object) 78 | } 79 | -------------------------------------------------------------------------------- /internal/kibana/serviceaccount.go: -------------------------------------------------------------------------------- 1 | package kibana 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | "github.com/openshift/elasticsearch-operator/internal/manifests/serviceaccount" 8 | "github.com/openshift/elasticsearch-operator/internal/utils" 9 | ) 10 | 11 | // CreateOrUpdateServiceAccount creates or updates a ServiceAccount for logging with the given name 12 | func (clusterRequest *KibanaRequest) CreateOrUpdateServiceAccount(name string, annotations map[string]string) error { 13 | sa := serviceaccount.New(name, clusterRequest.cluster.Namespace, annotations) 14 | 15 | utils.AddOwnerRefToObject(sa, getOwnerRef(clusterRequest.cluster)) 16 | 17 | err := serviceaccount.CreateOrUpdate(context.TODO(), clusterRequest.client, sa, serviceaccount.AnnotationsEqual, serviceaccount.MutateAnnotationsOnly) 18 | if err != nil { 19 | return kverrors.Wrap(err, "failed to create or update kibana serviceaccount", 20 | "cluster", clusterRequest.cluster.Name, 21 | "namespace", clusterRequest.cluster.Namespace, 22 | ) 23 | } 24 | 25 | return nil 26 | } 27 | -------------------------------------------------------------------------------- /internal/kibana/trustedcabundle.go: -------------------------------------------------------------------------------- 1 | package kibana 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | "github.com/openshift/elasticsearch-operator/internal/constants" 8 | "github.com/openshift/elasticsearch-operator/internal/manifests/configmap" 9 | "github.com/openshift/elasticsearch-operator/internal/utils" 10 | corev1 "k8s.io/api/core/v1" 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | /* 16 | * Create or Get Trusted CA Bundle ConfigMap. 17 | * By setting label "config.openshift.io/inject-trusted-cabundle: true", the cert is automatically filled/updated. 18 | * Thus, we need the get the contents once again. 19 | */ 20 | func (clusterRequest *KibanaRequest) createOrGetTrustedCABundleConfigMap(name string) (*corev1.ConfigMap, error) { 21 | configMap := configmap.New( 22 | name, 23 | clusterRequest.cluster.Namespace, 24 | map[string]string{ 25 | constants.InjectTrustedCABundleLabel: "true", 26 | }, 27 | map[string]string{ 28 | constants.TrustedCABundleKey: "", 29 | }, 30 | ) 31 | 32 | utils.AddOwnerRefToObject(configMap, getOwnerRef(clusterRequest.cluster)) 33 | 34 | err := configmap.Create(context.TODO(), clusterRequest.client, configMap) 35 | if err != nil && !apierrors.IsAlreadyExists(kverrors.Root(err)) { 36 | return nil, kverrors.Wrap(err, "failed to create trusted CA bundle config map", 37 | "cluster", clusterRequest.cluster.Name, 38 | ) 39 | } 40 | 41 | // Get the existing config map which may include an injected CA bundle 42 | key := client.ObjectKey{Name: name, Namespace: clusterRequest.cluster.Namespace} 43 | configMap, err = configmap.Get(context.TODO(), clusterRequest.client, key) 44 | if err != nil { 45 | return nil, kverrors.Wrap(err, "failed to get trusted CA bundle config map", 46 | "cluster", clusterRequest.cluster.Name, 47 | ) 48 | } 49 | return configMap, nil 50 | } 51 | 52 | func hasTrustedCABundle(configMap *corev1.ConfigMap) bool { 53 | if configMap == nil { 54 | return false 55 | } 56 | caBundle, ok := configMap.Data[constants.TrustedCABundleKey] 57 | return ok && caBundle != "" 58 | } 59 | 60 | func calcTrustedCAHashValue(configMap *corev1.ConfigMap) (string, error) { 61 | hashValue := "" 62 | var err error 63 | 64 | if configMap == nil { 65 | return hashValue, nil 66 | } 67 | caBundle, ok := configMap.Data[constants.TrustedCABundleKey] 68 | if ok && caBundle != "" { 69 | hashValue, err = utils.CalculateMD5Hash(caBundle) 70 | if err != nil { 71 | return "", kverrors.Wrap(err, "failed to calculate hash") 72 | } 73 | } 74 | 75 | if !ok { 76 | return "", kverrors.New("expected key does not exist in configmap", 77 | "key", constants.TrustedCABundleKey, 78 | "configmap", configMap.Name) 79 | } 80 | 81 | return hashValue, nil 82 | } 83 | -------------------------------------------------------------------------------- /internal/manifests/configmap/build.go: -------------------------------------------------------------------------------- 1 | package configmap 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // New returns a new k8s configmap 9 | func New(name, namespace string, labels map[string]string, data map[string]string) *corev1.ConfigMap { 10 | return &corev1.ConfigMap{ 11 | TypeMeta: metav1.TypeMeta{ 12 | Kind: "ConfigMap", 13 | APIVersion: corev1.SchemeGroupVersion.String(), 14 | }, 15 | ObjectMeta: metav1.ObjectMeta{ 16 | Name: name, 17 | Namespace: namespace, 18 | Labels: labels, 19 | }, 20 | Data: data, 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /internal/manifests/console/build.go: -------------------------------------------------------------------------------- 1 | package console 2 | 3 | import ( 4 | consolev1 "github.com/openshift/api/console/v1" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // NewConsoleLink returns a new openshift api console link 10 | func NewConsoleLink(name, href, text, icon, section string) *consolev1.ConsoleLink { 11 | return &consolev1.ConsoleLink{ 12 | ObjectMeta: metav1.ObjectMeta{ 13 | Name: name, 14 | }, 15 | Spec: consolev1.ConsoleLinkSpec{ 16 | Location: consolev1.ApplicationMenu, 17 | Link: consolev1.Link{ 18 | Text: text, 19 | Href: href, 20 | }, 21 | ApplicationMenu: &consolev1.ApplicationMenuSpec{ 22 | ImageURL: icon, 23 | Section: section, 24 | }, 25 | }, 26 | } 27 | } 28 | 29 | // NewConsoleExternalLogLink returns a new opensnfhit api ConsoleExternalLogLink 30 | func NewConsoleExternalLogLink(consoleText, hrefTemplate string, labels map[string]string) *consolev1.ConsoleExternalLogLink { 31 | return &consolev1.ConsoleExternalLogLink{ 32 | TypeMeta: metav1.TypeMeta{ 33 | Kind: "ConsoleExternalLogLink", 34 | APIVersion: consolev1.SchemeGroupVersion.String(), 35 | }, 36 | ObjectMeta: metav1.ObjectMeta{ 37 | Name: ExternalLogLinkName, 38 | Labels: labels, 39 | }, 40 | Spec: consolev1.ConsoleExternalLogLinkSpec{ 41 | Text: consoleText, 42 | HrefTemplate: hrefTemplate, 43 | }, 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /internal/manifests/cronjob/build.go: -------------------------------------------------------------------------------- 1 | package cronjob 2 | 3 | import ( 4 | batchv1 "k8s.io/api/batch/v1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // Builder represents the type to build Cronjob objects 10 | type Builder struct { 11 | cj *batchv1.CronJob 12 | } 13 | 14 | // New returns a new Builder for Cronjob objects 15 | func New(name, namespace string, labels map[string]string) *Builder { 16 | return &Builder{cj: newCronjob(name, namespace, labels)} 17 | } 18 | 19 | func newCronjob(name, namespace string, labels map[string]string) *batchv1.CronJob { 20 | return &batchv1.CronJob{ 21 | TypeMeta: metav1.TypeMeta{ 22 | Kind: "CronJob", 23 | APIVersion: batchv1.SchemeGroupVersion.String(), 24 | }, 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: name, 27 | Namespace: namespace, 28 | Labels: labels, 29 | }, 30 | Spec: batchv1.CronJobSpec{ 31 | JobTemplate: batchv1.JobTemplateSpec{ 32 | Spec: batchv1.JobSpec{ 33 | Template: corev1.PodTemplateSpec{ 34 | ObjectMeta: metav1.ObjectMeta{ 35 | Namespace: namespace, 36 | Labels: labels, 37 | }, 38 | }, 39 | }, 40 | }, 41 | }, 42 | } 43 | } 44 | 45 | // Build returns the final Cronjob object 46 | func (b *Builder) Build() *batchv1.CronJob { return b.cj } 47 | 48 | // WithConcurrencyPolicy sets the concurrency policy for the cronjob 49 | func (b *Builder) WithConcurrencyPolicy(cp batchv1.ConcurrencyPolicy) *Builder { 50 | b.cj.Spec.ConcurrencyPolicy = cp 51 | return b 52 | } 53 | 54 | // WithSuccessfulJobsHistoryLimit sets the limit for the history of successful jobs 55 | func (b *Builder) WithSuccessfulJobsHistoryLimit(l int32) *Builder { 56 | b.cj.Spec.SuccessfulJobsHistoryLimit = &l 57 | return b 58 | } 59 | 60 | // WithFailedJobsHistoryLimit sets the limit for the history of failed jobs 61 | func (b *Builder) WithFailedJobsHistoryLimit(l int32) *Builder { 62 | b.cj.Spec.FailedJobsHistoryLimit = &l 63 | return b 64 | } 65 | 66 | // WithSchedule sets the cronjob's schedule 67 | func (b *Builder) WithSchedule(s string) *Builder { 68 | b.cj.Spec.Schedule = s 69 | return b 70 | } 71 | 72 | // WithBackoffLimit sets the cronjob's job backoff limit 73 | func (b *Builder) WithBackoffLimit(l int32) *Builder { 74 | b.cj.Spec.JobTemplate.Spec.BackoffLimit = &l 75 | return b 76 | } 77 | 78 | // WithParallelism sets the cronjob's job parallelism limit 79 | func (b *Builder) WithParallelism(p int32) *Builder { 80 | b.cj.Spec.JobTemplate.Spec.Parallelism = &p 81 | return b 82 | } 83 | 84 | // WithSuspend sets the cronjob's suspend state 85 | func (b *Builder) WithSuspend(s bool) *Builder { 86 | b.cj.Spec.Suspend = &s 87 | return b 88 | } 89 | 90 | // WithPodSpec sets the cronjob pod spec and its name 91 | func (b *Builder) WithPodSpec(containerName string, spec *corev1.PodSpec) *Builder { 92 | b.cj.Spec.JobTemplate.Spec.Template.ObjectMeta.Name = containerName 93 | b.cj.Spec.JobTemplate.Spec.Template.Spec = *spec 94 | return b 95 | } 96 | -------------------------------------------------------------------------------- /internal/manifests/deployment/build.go: -------------------------------------------------------------------------------- 1 | package deployment 2 | 3 | import ( 4 | appsv1 "k8s.io/api/apps/v1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // Builder represents the struct to build k8s deployments 10 | type Builder struct { 11 | dpl *appsv1.Deployment 12 | } 13 | 14 | // New returns a new Builder instance with a default initialized deployment. 15 | func New(deploymentName, namespace string, labels map[string]string, replicas int32) *Builder { 16 | return &Builder{dpl: newDeployment(deploymentName, namespace, labels, replicas)} 17 | } 18 | 19 | func newDeployment(deploymentName, namespace string, labels map[string]string, replicas int32) *appsv1.Deployment { 20 | return &appsv1.Deployment{ 21 | TypeMeta: metav1.TypeMeta{ 22 | Kind: "Deployment", 23 | APIVersion: appsv1.SchemeGroupVersion.String(), 24 | }, 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: deploymentName, 27 | Namespace: namespace, 28 | Labels: labels, 29 | }, 30 | Spec: appsv1.DeploymentSpec{ 31 | Replicas: &replicas, 32 | }, 33 | } 34 | } 35 | 36 | // Build returns the final deployment. 37 | func (b *Builder) Build() *appsv1.Deployment { return b.dpl } 38 | 39 | // WithSelector sets the deployment pod selector. 40 | func (b *Builder) WithSelector(s metav1.LabelSelector) *Builder { 41 | b.dpl.Spec.Selector = &s 42 | return b 43 | } 44 | 45 | // WithStrategy sets the deployment strategy 46 | func (b *Builder) WithStrategy(s appsv1.DeploymentStrategyType) *Builder { 47 | b.dpl.Spec.Strategy = appsv1.DeploymentStrategy{Type: s} 48 | return b 49 | } 50 | 51 | // WithTemplate sets the deployment pod template spec 52 | func (b *Builder) WithTemplate(t corev1.PodTemplateSpec) *Builder { 53 | b.dpl.Spec.Template = t 54 | return b 55 | } 56 | 57 | // WithPaused sets the deployment spec paused flag 58 | func (b *Builder) WithPaused(p bool) *Builder { 59 | b.dpl.Spec.Paused = p 60 | return b 61 | } 62 | 63 | // WithProgressDeadlineSeconds sets the deployment ProgressDeadlineSeconds 64 | func (b *Builder) WithProgressDeadlineSeconds(pds int32) *Builder { 65 | b.dpl.Spec.ProgressDeadlineSeconds = &pds 66 | return b 67 | } 68 | -------------------------------------------------------------------------------- /internal/manifests/persistentvolume/build.go: -------------------------------------------------------------------------------- 1 | package persistentvolume 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // NewPVC retuns a new k8s persistentvolumeclaim 9 | func NewPVC(pvcName, namespace string, labels map[string]string) *corev1.PersistentVolumeClaim { 10 | return &corev1.PersistentVolumeClaim{ 11 | TypeMeta: metav1.TypeMeta{ 12 | Kind: "PersistentVolumeClaim", 13 | APIVersion: "v1", 14 | }, 15 | ObjectMeta: metav1.ObjectMeta{ 16 | Name: pvcName, 17 | Namespace: namespace, 18 | Labels: labels, 19 | }, 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /internal/manifests/pod/build.go: -------------------------------------------------------------------------------- 1 | package pod 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/openshift/elasticsearch-operator/internal/utils" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | // Builder represents the struct to build k9s podspecs 12 | type Builder struct { 13 | spec *corev1.PodSpec 14 | } 15 | 16 | // NewSpec returns a new Builder instance with a default initialized podspec 17 | func NewSpec(serviceAccountName string, containers []corev1.Container, volumes []corev1.Volume) *Builder { 18 | return &Builder{spec: newPodSpec(serviceAccountName, containers, volumes)} 19 | } 20 | 21 | func newPodSpec(serviceAccountName string, containers []corev1.Container, volumes []corev1.Volume) *corev1.PodSpec { 22 | return &corev1.PodSpec{ 23 | ServiceAccountName: serviceAccountName, 24 | Containers: containers, 25 | Volumes: volumes, 26 | NodeSelector: utils.EnsureLinuxNodeSelector(map[string]string{}), 27 | } 28 | } 29 | 30 | // Build returns the final podspec 31 | func (b *Builder) Build() *corev1.PodSpec { return b.spec } 32 | 33 | // WithNodeSelectors sets the podsec selectors and ensures that the 34 | // default linux node selector is always present. 35 | func (b *Builder) WithNodeSelectors(s map[string]string) *Builder { 36 | b.spec.NodeSelector = utils.EnsureLinuxNodeSelector(s) 37 | return b 38 | } 39 | 40 | // WithTolerations appends tolerations to the podspec 41 | func (b *Builder) WithTolerations(t ...corev1.Toleration) *Builder { 42 | b.spec.Tolerations = append(b.spec.Tolerations, t...) 43 | return b 44 | } 45 | 46 | // WithAffinity sets the affinity rule for the podspec 47 | func (b *Builder) WithAffinity(a *corev1.Affinity) *Builder { 48 | b.spec.Affinity = a 49 | return b 50 | } 51 | 52 | // WithRestartPolicy sets the restart policy for the podspec 53 | func (b *Builder) WithRestartPolicy(rp corev1.RestartPolicy) *Builder { 54 | b.spec.RestartPolicy = rp 55 | return b 56 | } 57 | 58 | // WithTerminationGracePeriodSeconds sets the termination grace period for the podspec 59 | func (b *Builder) WithTerminationGracePeriodSeconds(p time.Duration) *Builder { 60 | d := int64(p.Seconds()) 61 | b.spec.TerminationGracePeriodSeconds = &d 62 | return b 63 | } 64 | 65 | // WithSecurityContext sets the security context for the podspec 66 | func (b *Builder) WithSecurityContext(sc corev1.PodSecurityContext) *Builder { 67 | b.spec.SecurityContext = &sc 68 | return b 69 | } 70 | -------------------------------------------------------------------------------- /internal/manifests/pod/compare.go: -------------------------------------------------------------------------------- 1 | package pod 2 | 3 | import ( 4 | "reflect" 5 | 6 | "github.com/openshift/elasticsearch-operator/internal/utils/comparators" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | // ArePodTemplateSpecEqual compares two corev1.PodTemplateSpec objects 12 | // and returns true only if pod spec are equal and tolerations are strictly the same 13 | func ArePodTemplateSpecEqual(lhs, rhs corev1.PodTemplateSpec) bool { 14 | return ArePodSpecEqual(lhs.Spec, rhs.Spec, true) 15 | } 16 | 17 | // ArePodSpecEqual compares two corev1.PodSpec objects and returns true 18 | // only if they are equal in any of the following: 19 | // - Length of containers slice 20 | // - Node selectors 21 | // - Tolerations, if strict they need to be the same, non-strict for superset check 22 | // - Containers: Name, Image, VolumeMounts, EnvVar, Args, Ports, ResourceRequirements 23 | func ArePodSpecEqual(lhs, rhs corev1.PodSpec, strictTolerations bool) bool { 24 | equal := true 25 | 26 | if len(lhs.Containers) != len(rhs.Containers) { 27 | equal = false 28 | } 29 | 30 | // check nodeselectors 31 | if !comparators.AreSelectorsSame(lhs.NodeSelector, rhs.NodeSelector) { 32 | equal = false 33 | } 34 | 35 | // strictTolerations are for when we compare from the deployments or statefulsets 36 | // if we are seeing if rolled out pods contain changes we don't want strictTolerations 37 | // since k8s may add additional tolerations to pods 38 | if strictTolerations { 39 | // check tolerations 40 | if !comparators.AreTolerationsSame(lhs.Tolerations, rhs.Tolerations) { 41 | equal = false 42 | } 43 | } else { 44 | // check tolerations 45 | if !comparators.ContainsSameTolerations(lhs.Tolerations, rhs.Tolerations) { 46 | equal = false 47 | } 48 | } 49 | 50 | // check container fields 51 | for _, lContainer := range lhs.Containers { 52 | found := false 53 | 54 | for _, rContainer := range rhs.Containers { 55 | // Only compare the images of containers with the same name 56 | if lContainer.Name != rContainer.Name { 57 | continue 58 | } 59 | 60 | found = true 61 | 62 | // can't use reflect.DeepEqual here, due to k8s adding token mounts 63 | // check that rContainer is all found within lContainer and that they match by name 64 | if !comparators.ContainsSameVolumeMounts(lContainer.VolumeMounts, rContainer.VolumeMounts) { 65 | equal = false 66 | } 67 | 68 | if lContainer.Image != rContainer.Image { 69 | equal = false 70 | } 71 | 72 | if !comparators.EnvValueEqual(lContainer.Env, rContainer.Env) { 73 | equal = false 74 | } 75 | 76 | if !reflect.DeepEqual(lContainer.Args, rContainer.Args) { 77 | equal = false 78 | } 79 | 80 | if !reflect.DeepEqual(lContainer.Ports, rContainer.Ports) { 81 | equal = false 82 | } 83 | 84 | if !comparators.AreResourceRequementsSame(lContainer.Resources, rContainer.Resources) { 85 | equal = false 86 | } 87 | } 88 | 89 | if !found { 90 | equal = false 91 | } 92 | } 93 | 94 | return equal 95 | } 96 | -------------------------------------------------------------------------------- /internal/manifests/pod/pod.go: -------------------------------------------------------------------------------- 1 | package pod 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | // List returns a list of pods in a namespace that match the given selector 13 | func List(ctx context.Context, c client.Client, namespace string, selector map[string]string) ([]corev1.Pod, error) { 14 | list := &corev1.PodList{} 15 | opts := []client.ListOption{ 16 | client.InNamespace(namespace), 17 | client.MatchingLabels(selector), 18 | } 19 | if err := c.List(ctx, list, opts...); err != nil { 20 | return nil, kverrors.Wrap(err, "failed to list pods", 21 | "namespace", namespace, 22 | ) 23 | } 24 | 25 | return list.Items, nil 26 | } 27 | -------------------------------------------------------------------------------- /internal/manifests/prometheusrule/build.go: -------------------------------------------------------------------------------- 1 | package prometheusrule 2 | 3 | import ( 4 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // New returns a prometheus-operator prometheusrule. 10 | func New(ruleName, namespace string, labels map[string]string, groups []monitoringv1.RuleGroup) *monitoringv1.PrometheusRule { 11 | return &monitoringv1.PrometheusRule{ 12 | TypeMeta: metav1.TypeMeta{ 13 | Kind: monitoringv1.PrometheusRuleKind, 14 | APIVersion: monitoringv1.SchemeGroupVersion.String(), 15 | }, 16 | ObjectMeta: metav1.ObjectMeta{ 17 | Name: ruleName, 18 | Namespace: namespace, 19 | Labels: labels, 20 | }, 21 | Spec: monitoringv1.PrometheusRuleSpec{ 22 | Groups: groups, 23 | }, 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /internal/manifests/prometheusrule/prometheusrule.go: -------------------------------------------------------------------------------- 1 | package prometheusrule 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 8 | "k8s.io/apimachinery/pkg/api/equality" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/client-go/util/retry" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | // CreateOrUpdate attempts first to get the given prometheusrule. If the 15 | // prometheusrule does not exist, the prometheusrule will be created. Otherwise, 16 | // if the prometheusrule exists and the provided comparison func detects any changes 17 | // an update is attempted. Updates are retried with backoff (See retry.DefaultRetry). 18 | // Returns on failure an non-nil error. 19 | func CreateOrUpdate(ctx context.Context, c client.Client, pr *monitoringv1.PrometheusRule) error { 20 | current := &monitoringv1.PrometheusRule{} 21 | key := client.ObjectKey{Name: pr.Name, Namespace: pr.Namespace} 22 | err := c.Get(ctx, key, current) 23 | if err != nil { 24 | if apierrors.IsNotFound(err) { 25 | err = c.Create(ctx, pr) 26 | 27 | if err == nil { 28 | return nil 29 | } 30 | 31 | return kverrors.Wrap(err, "failed to create prometheusrule", 32 | "name", pr.Name, 33 | "namespace", pr.Namespace, 34 | ) 35 | } 36 | 37 | return kverrors.Wrap(err, "failed to get prometheusrule", 38 | "name", pr.Name, 39 | "namespace", pr.Namespace, 40 | ) 41 | } 42 | 43 | if !equality.Semantic.DeepEqual(current, pr) { 44 | err := retry.RetryOnConflict(retry.DefaultRetry, func() error { 45 | if err := c.Get(ctx, key, current); err != nil { 46 | return kverrors.Wrap(err, "failed to get prometheusrule", 47 | "name", pr.Name, 48 | "namespace", pr.Namespace, 49 | ) 50 | } 51 | 52 | current.Spec = pr.Spec 53 | if err := c.Update(ctx, current); err != nil { 54 | return err 55 | } 56 | return nil 57 | }) 58 | if err != nil { 59 | return kverrors.Wrap(err, "failed to update prometheusrule", 60 | "name", pr.Name, 61 | "namespace", pr.Namespace, 62 | ) 63 | } 64 | return nil 65 | } 66 | 67 | return nil 68 | } 69 | -------------------------------------------------------------------------------- /internal/manifests/rbac/clusterrole.go: -------------------------------------------------------------------------------- 1 | package rbac 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | rbacv1 "k8s.io/api/rbac/v1" 8 | "k8s.io/apimachinery/pkg/api/equality" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/client-go/util/retry" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | // CreateOrUpdateClusterRole attempts first to get the given clusterrole. If the 15 | // clusterrole does not exist, the clusterrole will be created. Otherwise, 16 | // if the clusterrole exists and the provided comparison func detects any changes 17 | // an update is attempted. Updates are retried with backoff (See retry.DefaultRetry). 18 | // Returns on failure an non-nil error. 19 | func CreateOrUpdateClusterRole(ctx context.Context, c client.Client, cr *rbacv1.ClusterRole) error { 20 | current := &rbacv1.ClusterRole{} 21 | key := client.ObjectKey{Name: cr.Name} 22 | err := c.Get(ctx, key, current) 23 | if err != nil { 24 | if apierrors.IsNotFound(err) { 25 | err = c.Create(ctx, cr) 26 | 27 | if err == nil { 28 | return nil 29 | } 30 | 31 | return kverrors.Wrap(err, "failed to create clusterrole", 32 | "name", cr.Name, 33 | ) 34 | } 35 | 36 | return kverrors.Wrap(err, "failed to get clusterrole", 37 | "name", cr.Name, 38 | ) 39 | } 40 | 41 | if !equality.Semantic.DeepEqual(current, cr) { 42 | err := retry.RetryOnConflict(retry.DefaultRetry, func() error { 43 | if err := c.Get(ctx, key, current); err != nil { 44 | return kverrors.Wrap(err, "failed to get clusterrole", 45 | "name", cr.Name, 46 | ) 47 | } 48 | 49 | current.Rules = cr.Rules 50 | if err := c.Update(ctx, current); err != nil { 51 | return err 52 | } 53 | return nil 54 | }) 55 | if err != nil { 56 | return kverrors.Wrap(err, "failed to update clusterrole", 57 | "name", cr.Name, 58 | ) 59 | } 60 | return nil 61 | } 62 | return nil 63 | } 64 | 65 | // DeleteClusterRole attempts to delete a k8s cluster role if existing or returns an error. 66 | func DeleteClusterRole(ctx context.Context, c client.Client, key client.ObjectKey) error { 67 | cr := NewClusterRole(key.Name, []rbacv1.PolicyRule{}) 68 | 69 | if err := c.Delete(ctx, cr, &client.DeleteOptions{}); err != nil { 70 | if !apierrors.IsNotFound(kverrors.Root(err)) { 71 | return kverrors.Wrap(err, "failed to delete clusterrole", 72 | "name", cr.Name, 73 | ) 74 | } 75 | } 76 | 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /internal/manifests/rbac/clusterrolebinding.go: -------------------------------------------------------------------------------- 1 | package rbac 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | rbacv1 "k8s.io/api/rbac/v1" 8 | "k8s.io/apimachinery/pkg/api/equality" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/client-go/util/retry" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | // CreateOrUpdateClusterRoleBinding attempts first to get the given clusterrolebinding. If the 15 | // clusterrolebinding does not exist, the clusterrolebinding will be created. Otherwise, 16 | // if the clusterrolebinding exists and the provided comparison func detects any changes 17 | // an update is attempted. Updates are retried with backoff (See retry.DefaultRetry). 18 | // Returns on failure an non-nil error. 19 | func CreateOrUpdateClusterRoleBinding(ctx context.Context, c client.Client, crb *rbacv1.ClusterRoleBinding) error { 20 | current := &rbacv1.ClusterRoleBinding{} 21 | key := client.ObjectKey{Name: crb.Name} 22 | err := c.Get(ctx, key, current) 23 | if err != nil { 24 | if apierrors.IsNotFound(err) { 25 | err = c.Create(ctx, crb) 26 | 27 | if err == nil { 28 | return nil 29 | } 30 | 31 | return kverrors.Wrap(err, "failed to create clusterrolebinding", 32 | "name", crb.Name, 33 | ) 34 | } 35 | 36 | return kverrors.Wrap(err, "failed to get clusterrolebinding", 37 | "name", crb.Name, 38 | ) 39 | } 40 | 41 | if !equality.Semantic.DeepEqual(current, crb) { 42 | err := retry.RetryOnConflict(retry.DefaultRetry, func() error { 43 | if err := c.Get(ctx, key, current); err != nil { 44 | return kverrors.Wrap(err, "failed to get clusterrolebinding", 45 | "name", crb.Name, 46 | ) 47 | } 48 | 49 | current.Subjects = crb.Subjects 50 | if err := c.Update(ctx, current); err != nil { 51 | return err 52 | } 53 | return nil 54 | }) 55 | if err != nil { 56 | return kverrors.Wrap(err, "failed to update clusterrolebinding", 57 | "name", crb.Name, 58 | ) 59 | } 60 | return nil 61 | } 62 | return nil 63 | } 64 | 65 | // DeleteClusterRoleBinding attempts to delete a k8s cluster role binding if existing or returns an error. 66 | func DeleteClusterRoleBinding(ctx context.Context, c client.Client, key client.ObjectKey) error { 67 | crb := NewClusterRoleBinding(key.Name, "", []rbacv1.Subject{}) 68 | 69 | if err := c.Delete(ctx, crb, &client.DeleteOptions{}); err != nil { 70 | if !apierrors.IsNotFound(kverrors.Root(err)) { 71 | return kverrors.Wrap(err, "failed to delete clusterrolebinding", 72 | "name", crb.Name, 73 | ) 74 | } 75 | } 76 | 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /internal/manifests/rbac/role.go: -------------------------------------------------------------------------------- 1 | package rbac 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | rbacv1 "k8s.io/api/rbac/v1" 8 | "k8s.io/apimachinery/pkg/api/equality" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/client-go/util/retry" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | // CreateOrUpdateRole attempts first to get the given role. If the 15 | // role does not exist, the role will be created. Otherwise, 16 | // if the role exists and the provided comparison func detects any changes 17 | // an update is attempted. Updates are retried with backoff (See retry.DefaultRetry). 18 | // Returns on failure an non-nil error. 19 | func CreateOrUpdateRole(ctx context.Context, c client.Client, r *rbacv1.Role) error { 20 | current := &rbacv1.Role{} 21 | key := client.ObjectKey{Name: r.Name, Namespace: r.Namespace} 22 | err := c.Get(ctx, key, current) 23 | if err != nil { 24 | if apierrors.IsNotFound(err) { 25 | err = c.Create(ctx, r) 26 | 27 | if err == nil { 28 | return nil 29 | } 30 | 31 | return kverrors.Wrap(err, "failed to create role", 32 | "name", r.Name, 33 | "namespace", r.Namespace, 34 | ) 35 | } 36 | 37 | return kverrors.Wrap(err, "failed to get role", 38 | "name", r.Name, 39 | "namespace", r.Namespace, 40 | ) 41 | } 42 | 43 | if !equality.Semantic.DeepEqual(current, r) { 44 | err := retry.RetryOnConflict(retry.DefaultRetry, func() error { 45 | if err := c.Get(ctx, key, current); err != nil { 46 | return kverrors.Wrap(err, "failed to get role", 47 | "name", r.Name, 48 | "namespace", r.Namespace, 49 | ) 50 | } 51 | 52 | current.Rules = r.Rules 53 | if err := c.Update(ctx, current); err != nil { 54 | return err 55 | } 56 | return nil 57 | }) 58 | if err != nil { 59 | return kverrors.Wrap(err, "failed to update role", 60 | "name", r.Name, 61 | "namespace", r.Namespace, 62 | ) 63 | } 64 | return nil 65 | } 66 | return nil 67 | } 68 | -------------------------------------------------------------------------------- /internal/manifests/rbac/rolebinding.go: -------------------------------------------------------------------------------- 1 | package rbac 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/v2/kverrors" 7 | rbacv1 "k8s.io/api/rbac/v1" 8 | "k8s.io/apimachinery/pkg/api/equality" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/client-go/util/retry" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | // CreateOrUpdateRoleBinding attempts first to get the given rolebinding. If the 15 | // rolebinding does not exist, the rolebinding will be created. Otherwise, 16 | // if the rolebinding exists and the provided comparison func detects any changes 17 | // an update is attempted. Updates are retried with backoff (See retry.DefaultRetry). 18 | // Returns on failure an non-nil error. 19 | func CreateOrUpdateRoleBinding(ctx context.Context, c client.Client, rb *rbacv1.RoleBinding) error { 20 | current := &rbacv1.RoleBinding{} 21 | key := client.ObjectKey{Name: rb.Name, Namespace: rb.Namespace} 22 | err := c.Get(ctx, key, current) 23 | if err != nil { 24 | if apierrors.IsNotFound(err) { 25 | err = c.Create(ctx, rb) 26 | 27 | if err == nil { 28 | return nil 29 | } 30 | 31 | return kverrors.Wrap(err, "failed to create rolebinding", 32 | "name", rb.Name, 33 | "namespace", rb.Namespace, 34 | ) 35 | } 36 | 37 | return kverrors.Wrap(err, "failed to get rolebinding", 38 | "name", rb.Name, 39 | "namespace", rb.Namespace, 40 | ) 41 | } 42 | 43 | if !equality.Semantic.DeepEqual(current, rb) { 44 | err := retry.RetryOnConflict(retry.DefaultRetry, func() error { 45 | if err := c.Get(ctx, key, current); err != nil { 46 | return kverrors.Wrap(err, "failed to get rolebinding", 47 | "name", rb.Name, 48 | "namespace", rb.Namespace, 49 | ) 50 | } 51 | 52 | current.Subjects = rb.Subjects 53 | if err := c.Update(ctx, current); err != nil { 54 | return err 55 | } 56 | return nil 57 | }) 58 | if err != nil { 59 | return kverrors.Wrap(err, "failed to update rolebinding", 60 | "name", rb.Name, 61 | "namespace", rb.Namespace, 62 | ) 63 | } 64 | return nil 65 | } 66 | return nil 67 | } 68 | -------------------------------------------------------------------------------- /internal/manifests/route/build.go: -------------------------------------------------------------------------------- 1 | package route 2 | 3 | import ( 4 | routev1 "github.com/openshift/api/route/v1" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // Builder represents the struct to build openshift api route objects 10 | type Builder struct { 11 | r *routev1.Route 12 | } 13 | 14 | // New returns a new Builder for openshift api route objects. 15 | func New(routeName, namespace, serviceName string, labels map[string]string) *Builder { 16 | return &Builder{r: newRoute(routeName, namespace, serviceName, labels)} 17 | } 18 | 19 | func newRoute(routeName, namespace, serviceName string, labels map[string]string) *routev1.Route { 20 | return &routev1.Route{ 21 | TypeMeta: metav1.TypeMeta{ 22 | Kind: "Route", 23 | APIVersion: routev1.SchemeGroupVersion.String(), 24 | }, 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: routeName, 27 | Namespace: namespace, 28 | Labels: labels, 29 | }, 30 | Spec: routev1.RouteSpec{ 31 | To: routev1.RouteTargetReference{ 32 | Name: serviceName, 33 | Kind: "Service", 34 | }, 35 | }, 36 | } 37 | } 38 | 39 | // Build returns the final route object 40 | func (b *Builder) Build() *routev1.Route { return b.r } 41 | 42 | // WithTLSConfig sets the route TLS configuration 43 | func (b *Builder) WithTLSConfig(tc *routev1.TLSConfig) *Builder { 44 | b.r.Spec.TLS = tc 45 | return b 46 | } 47 | 48 | // WithCA sets the certificate authority to the TLS config if present 49 | func (b *Builder) WithCA(caCert []byte) *Builder { 50 | if b.r.Spec.TLS != nil { 51 | b.r.Spec.TLS.CACertificate = string(caCert) 52 | b.r.Spec.TLS.DestinationCACertificate = string(caCert) 53 | } 54 | return b 55 | } 56 | -------------------------------------------------------------------------------- /internal/manifests/secret/build.go: -------------------------------------------------------------------------------- 1 | package secret 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // New provides a k8s secret 9 | func New(name, namespace string, data map[string][]byte) *corev1.Secret { 10 | return &corev1.Secret{ 11 | ObjectMeta: metav1.ObjectMeta{ 12 | Name: name, 13 | Namespace: namespace, 14 | }, 15 | Data: data, 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /internal/manifests/securitycontextconstraints/build.go: -------------------------------------------------------------------------------- 1 | package securitycontextconstraints 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | 7 | securityv1 "github.com/openshift/api/security/v1" 8 | ) 9 | 10 | // Builder represents the struct to build security context constraints 11 | type Builder struct { 12 | scc *securityv1.SecurityContextConstraints 13 | } 14 | 15 | // New returns a new Builder for security context constraints 16 | func New(name string, allowPrivelegeContainer, allowHostDirVolumePlugin, readOnlyRootFilesystem bool) *Builder { 17 | return &Builder{scc: newConstraints(name, allowPrivelegeContainer, allowHostDirVolumePlugin, readOnlyRootFilesystem)} 18 | } 19 | 20 | func newConstraints(name string, allowPrivelegeContainer, allowHostDirVolumePlugin, readOnlyRootFilesystem bool) *securityv1.SecurityContextConstraints { 21 | return &securityv1.SecurityContextConstraints{ 22 | TypeMeta: metav1.TypeMeta{ 23 | Kind: "SecurityContextConstraints", 24 | APIVersion: securityv1.SchemeGroupVersion.String(), 25 | }, 26 | ObjectMeta: metav1.ObjectMeta{ 27 | Name: name, 28 | }, 29 | AllowPrivilegedContainer: allowPrivelegeContainer, 30 | AllowHostDirVolumePlugin: allowHostDirVolumePlugin, 31 | ReadOnlyRootFilesystem: readOnlyRootFilesystem, 32 | } 33 | } 34 | 35 | // Build returns the final security context constraints 36 | func (b *Builder) Build() *securityv1.SecurityContextConstraints { return b.scc } 37 | 38 | // Sets the constraints volumes 39 | func (b *Builder) WithVolumes(volumes []securityv1.FSType) *Builder { 40 | b.scc.Volumes = volumes 41 | return b 42 | } 43 | 44 | // Sets the constraints forbidden sysctls 45 | func (b *Builder) WithForbiddenSysctls(forbiddenSysctls []string) *Builder { 46 | b.scc.ForbiddenSysctls = forbiddenSysctls 47 | return b 48 | } 49 | 50 | // Sets the constraints drop capabilities 51 | func (b *Builder) WithRequiredDropCapabilities(capabilities []corev1.Capability) *Builder { 52 | b.scc.RequiredDropCapabilities = capabilities 53 | return b 54 | } 55 | 56 | // Sets the constraints user options 57 | func (b *Builder) WithRunAsUserOptions(options securityv1.RunAsUserStrategyOptions) *Builder { 58 | b.scc.RunAsUser = options 59 | return b 60 | } 61 | 62 | // Sets the constraints selinuxcontext options 63 | func (b *Builder) WithSELinuxContextOptions(options securityv1.SELinuxContextStrategyOptions) *Builder { 64 | b.scc.SELinuxContext = options 65 | return b 66 | } 67 | 68 | // Sets the constraints privelege escalation 69 | func (b *Builder) WithAllowPrivilegeEscalation(value bool) *Builder { 70 | b.scc.AllowPrivilegeEscalation = &value 71 | return b 72 | } 73 | 74 | // Sets the constraints default privelege escalation 75 | func (b *Builder) WithDefaultAllowPrivilegeEscalation(value bool) *Builder { 76 | b.scc.DefaultAllowPrivilegeEscalation = &value 77 | return b 78 | } 79 | -------------------------------------------------------------------------------- /internal/manifests/service/build.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // Builder represents the struct to build k8s services 9 | type Builder struct { 10 | svc *corev1.Service 11 | } 12 | 13 | // New returns a new Builder instance with a default initialized service. 14 | func New(serviceName, namespace string, labels map[string]string) *Builder { 15 | return &Builder{svc: newService(serviceName, namespace, labels)} 16 | } 17 | 18 | func newService(serviceName, namespace string, labels map[string]string) *corev1.Service { 19 | return &corev1.Service{ 20 | TypeMeta: metav1.TypeMeta{ 21 | Kind: "Service", 22 | APIVersion: corev1.SchemeGroupVersion.String(), 23 | }, 24 | ObjectMeta: metav1.ObjectMeta{ 25 | Name: serviceName, 26 | Namespace: namespace, 27 | Labels: labels, 28 | }, 29 | Spec: corev1.ServiceSpec{}, 30 | } 31 | } 32 | 33 | // Build returns the final service. 34 | func (b *Builder) Build() *corev1.Service { return b.svc } 35 | 36 | // WithAnnotations set the object meta annotations. 37 | func (b *Builder) WithAnnotations(a map[string]string) *Builder { 38 | b.svc.Annotations = a 39 | return b 40 | } 41 | 42 | // WithSelector sets the service selector. 43 | func (b *Builder) WithSelector(s map[string]string) *Builder { 44 | b.svc.Spec.Selector = s 45 | return b 46 | } 47 | 48 | // WithServicePorts appends service ports to the service spec. 49 | func (b *Builder) WithServicePorts(sp ...corev1.ServicePort) *Builder { 50 | b.svc.Spec.Ports = append(b.svc.Spec.Ports, sp...) 51 | return b 52 | } 53 | 54 | // WithPublishNotReady sets the spec PublishNotReadyAddresses flag. 55 | func (b *Builder) WithPublishNotReady(val bool) *Builder { 56 | b.svc.Spec.PublishNotReadyAddresses = val 57 | return b 58 | } 59 | -------------------------------------------------------------------------------- /internal/manifests/serviceaccount/build.go: -------------------------------------------------------------------------------- 1 | package serviceaccount 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // New returns a new k8s serviceaccount 9 | func New(saName, namespace string, annotations map[string]string) *corev1.ServiceAccount { 10 | return &corev1.ServiceAccount{ 11 | TypeMeta: metav1.TypeMeta{ 12 | Kind: "ServiceAccount", 13 | APIVersion: corev1.SchemeGroupVersion.String(), 14 | }, 15 | ObjectMeta: metav1.ObjectMeta{ 16 | Name: saName, 17 | Namespace: namespace, 18 | Annotations: annotations, 19 | }, 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /internal/manifests/servicemonitor/build.go: -------------------------------------------------------------------------------- 1 | package servicemonitor 2 | 3 | import ( 4 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // Builder represents the struct to build servicemonitors 10 | type Builder struct { 11 | sm *monitoringv1.ServiceMonitor 12 | } 13 | 14 | // New returns a Builder for servicemonitors. 15 | func New(smName, namespace string, labels map[string]string) *Builder { 16 | return &Builder{sm: newServiceMonitor(smName, namespace, labels)} 17 | } 18 | 19 | func newServiceMonitor(serviceMonitorName, namespace string, labels map[string]string) *monitoringv1.ServiceMonitor { 20 | return &monitoringv1.ServiceMonitor{ 21 | TypeMeta: metav1.TypeMeta{ 22 | Kind: monitoringv1.ServiceMonitorsKind, 23 | APIVersion: monitoringv1.SchemeGroupVersion.String(), 24 | }, 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: serviceMonitorName, 27 | Namespace: namespace, 28 | Labels: labels, 29 | }, 30 | Spec: monitoringv1.ServiceMonitorSpec{}, 31 | } 32 | } 33 | 34 | // Build returns the final servicemonitor 35 | func (b *Builder) Build() *monitoringv1.ServiceMonitor { return b.sm } 36 | 37 | // WithJobLabel sets the servicemonitor job label 38 | func (b *Builder) WithJobLabel(l string) *Builder { 39 | b.sm.Spec.JobLabel = l 40 | return b 41 | } 42 | 43 | // WithSelector sets the servicemonitor selector 44 | func (b *Builder) WithSelector(s metav1.LabelSelector) *Builder { 45 | b.sm.Spec.Selector = s 46 | return b 47 | } 48 | 49 | // WithNamespaceSelector sets ths servicemonitor namespace selector 50 | func (b *Builder) WithNamespaceSelector(nss monitoringv1.NamespaceSelector) *Builder { 51 | b.sm.Spec.NamespaceSelector = nss 52 | return b 53 | } 54 | 55 | // WithEndpoints appends endpoints to the servicemonitor 56 | func (b *Builder) WithEndpoints(ep ...monitoringv1.Endpoint) *Builder { 57 | b.sm.Spec.Endpoints = append(b.sm.Spec.Endpoints, ep...) 58 | return b 59 | } 60 | -------------------------------------------------------------------------------- /internal/manifests/statefulset/build.go: -------------------------------------------------------------------------------- 1 | package statefulset 2 | 3 | import ( 4 | appsv1 "k8s.io/api/apps/v1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // Builder represents the struct to build k8s statefulsets 10 | type Builder struct { 11 | sts *appsv1.StatefulSet 12 | } 13 | 14 | // New returns a new Builder instance with a default initialized statefulset. 15 | func New(statefulSetName, namespace string, labels map[string]string, replicas int32) *Builder { 16 | return &Builder{sts: newStatefulSet(statefulSetName, namespace, labels, replicas)} 17 | } 18 | 19 | func newStatefulSet(statefulSetName, namespace string, labels map[string]string, replicas int32) *appsv1.StatefulSet { 20 | return &appsv1.StatefulSet{ 21 | TypeMeta: metav1.TypeMeta{ 22 | Kind: "StatefulSet", 23 | APIVersion: appsv1.SchemeGroupVersion.String(), 24 | }, 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: statefulSetName, 27 | Namespace: namespace, 28 | Labels: labels, 29 | }, 30 | Spec: appsv1.StatefulSetSpec{ 31 | Replicas: &replicas, 32 | }, 33 | } 34 | } 35 | 36 | // Build returns the final statefulset. 37 | func (b *Builder) Build() *appsv1.StatefulSet { return b.sts } 38 | 39 | // WithSelector sets the statefulset pod selector. 40 | func (b *Builder) WithSelector(s metav1.LabelSelector) *Builder { 41 | b.sts.Spec.Selector = &s 42 | return b 43 | } 44 | 45 | // WithStrategy sets the statefulset spec update strategy 46 | func (b *Builder) WithUpdateStrategy(s appsv1.StatefulSetUpdateStrategy) *Builder { 47 | b.sts.Spec.UpdateStrategy = s 48 | return b 49 | } 50 | 51 | // WithTemplate sets the statefulset spec pod template spec 52 | func (b *Builder) WithTemplate(t corev1.PodTemplateSpec) *Builder { 53 | b.sts.Spec.Template = t 54 | return b 55 | } 56 | -------------------------------------------------------------------------------- /internal/utils/comparators/envvars.go: -------------------------------------------------------------------------------- 1 | package comparators 2 | 3 | import ( 4 | "reflect" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | /* 10 | * 11 | EnvValueEqual - check if 2 EnvValues are equal or not 12 | Notes: 13 | - reflect.DeepEqual does not return expected results if the to-be-compared value is a pointer. 14 | - needs to adjust with k8s.io/api/core/v#/types.go when the types are updated. 15 | * 16 | */ 17 | func EnvValueEqual(lhs, rhs []v1.EnvVar) bool { 18 | if len(lhs) != len(rhs) { 19 | return false 20 | } 21 | 22 | for _, l := range lhs { 23 | found := false 24 | 25 | for _, r := range rhs { 26 | 27 | if l.Name != r.Name { 28 | continue 29 | } 30 | 31 | found = true 32 | if !EnvVarEqual(l, r) { 33 | return false 34 | } 35 | } 36 | 37 | if !found { 38 | return false 39 | } 40 | } 41 | 42 | return true 43 | } 44 | 45 | func EnvVarEqual(lhs, rhs v1.EnvVar) bool { 46 | if lhs.ValueFrom != nil { 47 | if rhs.ValueFrom == nil { 48 | return false 49 | } 50 | 51 | // compare ValueFrom here 52 | return EnvVarSourceEqual(*lhs.ValueFrom, *rhs.ValueFrom) 53 | 54 | } else { 55 | if rhs.ValueFrom != nil { 56 | return false 57 | } 58 | 59 | // compare Value here 60 | return lhs.Value == rhs.Value 61 | } 62 | } 63 | 64 | func EnvVarSourceEqual(lhs, rhs v1.EnvVarSource) bool { 65 | if lhs.FieldRef != nil && rhs.FieldRef != nil { 66 | return EnvFieldRefEqual(*lhs.FieldRef, *rhs.FieldRef) 67 | } 68 | 69 | if lhs.ResourceFieldRef != nil && rhs.ResourceFieldRef != nil { 70 | return EnvResourceFieldRefEqual(*lhs.ResourceFieldRef, *rhs.ResourceFieldRef) 71 | } 72 | 73 | if lhs.ConfigMapKeyRef != nil && rhs.ConfigMapKeyRef != nil { 74 | return reflect.DeepEqual(*lhs.ConfigMapKeyRef, *rhs.ConfigMapKeyRef) 75 | } 76 | 77 | if lhs.SecretKeyRef != nil && rhs.SecretKeyRef != nil { 78 | return reflect.DeepEqual(*lhs.SecretKeyRef, *rhs.SecretKeyRef) 79 | } 80 | 81 | return false 82 | } 83 | 84 | func EnvFieldRefEqual(lhs, rhs v1.ObjectFieldSelector) bool { 85 | // taken from https://godoc.org/k8s.io/api/core/v1#ObjectFieldSelector 86 | // this is the default value, so if omitted by us k8s will add this value in 87 | defaultAPIVersion := "v1" 88 | 89 | if lhs.APIVersion == "" { 90 | lhs.APIVersion = defaultAPIVersion 91 | } 92 | 93 | if rhs.APIVersion == "" { 94 | rhs.APIVersion = defaultAPIVersion 95 | } 96 | 97 | if lhs.APIVersion != rhs.APIVersion { 98 | return false 99 | } 100 | 101 | return lhs.FieldPath == rhs.FieldPath 102 | } 103 | 104 | func EnvResourceFieldRefEqual(lhs, rhs v1.ResourceFieldSelector) bool { 105 | // taken from https://godoc.org/k8s.io/api/core/v1#ResourceFieldSelector 106 | // divisor's default value is "1" 107 | if lhs.Divisor.Cmp(rhs.Divisor) != 0 { 108 | return false 109 | } 110 | 111 | if lhs.ContainerName != rhs.ContainerName { 112 | return false 113 | } 114 | 115 | return lhs.Resource == rhs.Resource 116 | } 117 | -------------------------------------------------------------------------------- /internal/utils/comparators/maps.go: -------------------------------------------------------------------------------- 1 | package comparators 2 | 3 | import ( 4 | "reflect" 5 | ) 6 | 7 | // AreStringMapsSame compares two maps which are string key/value 8 | func AreStringMapsSame(lhs, rhs map[string]string) bool { 9 | return reflect.DeepEqual(lhs, rhs) 10 | } 11 | -------------------------------------------------------------------------------- /internal/utils/comparators/resources.go: -------------------------------------------------------------------------------- 1 | package comparators 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | ) 6 | 7 | func AreResourceRequementsSame(lhs, rhs v1.ResourceRequirements) bool { 8 | if rhs.Limits.Cpu().Cmp(*lhs.Limits.Cpu()) != 0 { 9 | return false 10 | } 11 | // Check memory limits 12 | if rhs.Limits.Memory().Cmp(*lhs.Limits.Memory()) != 0 { 13 | return false 14 | } 15 | // Check CPU requests 16 | if rhs.Requests.Cpu().Cmp(*lhs.Requests.Cpu()) != 0 { 17 | return false 18 | } 19 | // Check memory requests 20 | if rhs.Requests.Memory().Cmp(*lhs.Requests.Memory()) != 0 { 21 | return false 22 | } 23 | 24 | return true 25 | } 26 | -------------------------------------------------------------------------------- /internal/utils/comparators/selectors.go: -------------------------------------------------------------------------------- 1 | package comparators 2 | 3 | func AreSelectorsSame(lhs, rhs map[string]string) bool { 4 | if len(lhs) != len(rhs) { 5 | return false 6 | } 7 | 8 | for lhsKey, lhsVal := range lhs { 9 | rhsVal, ok := rhs[lhsKey] 10 | if !ok || lhsVal != rhsVal { 11 | return false 12 | } 13 | } 14 | 15 | return true 16 | } 17 | -------------------------------------------------------------------------------- /internal/utils/comparators/tolerations.go: -------------------------------------------------------------------------------- 1 | package comparators 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | ) 6 | 7 | // AreTolerationsSame compares two lists of tolerations for equality 8 | func AreTolerationsSame(lhs, rhs []v1.Toleration) bool { 9 | if len(lhs) != len(rhs) { 10 | return false 11 | } 12 | 13 | for _, lhsToleration := range lhs { 14 | if !containsToleration(lhsToleration, rhs) { 15 | return false 16 | } 17 | } 18 | 19 | return true 20 | } 21 | 22 | // containsSameTolerations checks that the tolerations in rhs are all contained within lhs 23 | // this follows our other patterns of "current, desired" 24 | func ContainsSameTolerations(lhs, rhs []v1.Toleration) bool { 25 | for _, rhsToleration := range rhs { 26 | if !containsToleration(rhsToleration, lhs) { 27 | return false 28 | } 29 | } 30 | 31 | return true 32 | } 33 | 34 | func containsToleration(toleration v1.Toleration, tolerations []v1.Toleration) bool { 35 | for _, t := range tolerations { 36 | if isTolerationSame(t, toleration) { 37 | return true 38 | } 39 | } 40 | 41 | return false 42 | } 43 | 44 | func isTolerationSame(lhs, rhs v1.Toleration) bool { 45 | tolerationSecondsBool := false 46 | // check that both are either null or not null 47 | if (lhs.TolerationSeconds == nil) == (rhs.TolerationSeconds == nil) { 48 | if lhs.TolerationSeconds != nil { 49 | // only compare values (attempt to dereference) if pointers aren't nil 50 | tolerationSecondsBool = *lhs.TolerationSeconds == *rhs.TolerationSeconds 51 | } else { 52 | tolerationSecondsBool = true 53 | } 54 | } 55 | 56 | tolerationEffectBool := lhs.Effect == rhs.Effect 57 | if lhs.Effect == "" || rhs.Effect == "" { 58 | tolerationEffectBool = true 59 | } 60 | 61 | // A toleration with the exists operator can leave the key empty to tolerate everything 62 | if (lhs.Operator == rhs.Operator) && (lhs.Operator == v1.TolerationOpExists) { 63 | if lhs.Key == "" || rhs.Key == "" { 64 | return true 65 | } 66 | } 67 | 68 | return (lhs.Key == rhs.Key) && 69 | (lhs.Operator == rhs.Operator) && 70 | (lhs.Value == rhs.Value) && 71 | tolerationEffectBool && 72 | tolerationSecondsBool 73 | } 74 | -------------------------------------------------------------------------------- /internal/utils/comparators/versions.go: -------------------------------------------------------------------------------- 1 | package comparators 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | ) 7 | 8 | type Version string 9 | 10 | func (v Version) ToArray() ([]int, error) { 11 | sections := strings.Split(string(v), ".") 12 | versionNumbers := make([]int, len(sections)) 13 | 14 | for i, s := range sections { 15 | vn, err := strconv.Atoi(s) 16 | if err != nil { 17 | return nil, err 18 | } 19 | versionNumbers[i] = vn 20 | } 21 | 22 | return versionNumbers, nil 23 | } 24 | 25 | // CompareVersionArrays will return one of: 26 | // -1 : if lhs > rhs 27 | // 0 : if lhs == rhs 28 | // 1 : if rhs > lhs 29 | func CompareVersionArrays(lhs, rhs []int) int { 30 | lLen := len(lhs) 31 | rLen := len(rhs) 32 | 33 | for i := 0; i < lLen && i < rLen; i++ { 34 | if lhs[i] > rhs[i] { 35 | return -1 36 | } 37 | 38 | if lhs[i] < rhs[i] { 39 | return 1 40 | } 41 | } 42 | 43 | // check if lhs is a more specific version number (aka newer) 44 | if lLen > rLen { 45 | return -1 46 | } 47 | 48 | // check if rhs is a more specific version number 49 | if lLen < rLen { 50 | return 1 51 | } 52 | 53 | // versions are exactly the same 54 | return 0 55 | } 56 | -------------------------------------------------------------------------------- /internal/utils/comparators/volume_mounts.go: -------------------------------------------------------------------------------- 1 | package comparators 2 | 3 | import ( 4 | "reflect" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | // check that all of rhs (desired) are contained within lhs (current) 10 | func ContainsSameVolumeMounts(lhs, rhs []corev1.VolumeMount) bool { 11 | for _, rVolumeMount := range rhs { 12 | found := false 13 | 14 | for _, lVolumeMount := range lhs { 15 | if lVolumeMount.Name == rVolumeMount.Name { 16 | found = true 17 | 18 | if !reflect.DeepEqual(lVolumeMount, rVolumeMount) { 19 | return false 20 | } 21 | } 22 | } 23 | 24 | if !found { 25 | return false 26 | } 27 | } 28 | 29 | return true 30 | } 31 | -------------------------------------------------------------------------------- /internal/utils/resources.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | "k8s.io/apimachinery/pkg/api/resource" 6 | ) 7 | 8 | func CompareResources(current, desired v1.ResourceRequirements) (bool, v1.ResourceRequirements) { 9 | changed := false 10 | desiredResources := *current.DeepCopy() 11 | if desiredResources.Limits == nil { 12 | desiredResources.Limits = map[v1.ResourceName]resource.Quantity{} 13 | } 14 | if desiredResources.Requests == nil { 15 | desiredResources.Requests = map[v1.ResourceName]resource.Quantity{} 16 | } 17 | 18 | if desired.Limits.Cpu().Cmp(*current.Limits.Cpu()) != 0 { 19 | desiredResources.Limits[v1.ResourceCPU] = *desired.Limits.Cpu() 20 | changed = true 21 | } 22 | // Check memory limits 23 | if desired.Limits.Memory().Cmp(*current.Limits.Memory()) != 0 { 24 | desiredResources.Limits[v1.ResourceMemory] = *desired.Limits.Memory() 25 | changed = true 26 | } 27 | // Check CPU requests 28 | if desired.Requests.Cpu().Cmp(*current.Requests.Cpu()) != 0 { 29 | desiredResources.Requests[v1.ResourceCPU] = *desired.Requests.Cpu() 30 | changed = true 31 | } 32 | // Check memory requests 33 | if desired.Requests.Memory().Cmp(*current.Requests.Memory()) != 0 { 34 | desiredResources.Requests[v1.ResourceMemory] = *desired.Requests.Memory() 35 | changed = true 36 | } 37 | 38 | return changed, desiredResources 39 | } 40 | -------------------------------------------------------------------------------- /internal/utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | v1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/api/resource" 9 | ) 10 | 11 | const ( 12 | envKey = "TEST" 13 | envValue = "value" 14 | ) 15 | 16 | func TestLookupEnvWithDefaultDefined(t *testing.T) { 17 | os.Setenv(envKey, envValue) 18 | res := LookupEnvWithDefault(envKey, "should be ignored") 19 | if res != envValue { 20 | t.Errorf("Expected %s=%s but got %s=%s", envKey, envValue, envKey, res) 21 | } 22 | } 23 | 24 | func TestLookupEnvWithDefaultUndefined(t *testing.T) { 25 | expected := "defaulted" 26 | os.Unsetenv(envKey) 27 | res := LookupEnvWithDefault(envKey, expected) 28 | if res != expected { 29 | t.Errorf("Expected %s=%s but got %s=%s", envKey, expected, envKey, res) 30 | } 31 | } 32 | 33 | func TestCompareResources(t *testing.T) { 34 | cases := []struct { 35 | current *v1.ResourceRequirements 36 | desired *v1.ResourceRequirements 37 | expected bool 38 | }{ 39 | { 40 | &v1.ResourceRequirements{}, 41 | &v1.ResourceRequirements{}, 42 | false, 43 | }, 44 | { 45 | &v1.ResourceRequirements{ 46 | Limits: map[v1.ResourceName]resource.Quantity{"cpu": *resource.NewMilliQuantity(1000, resource.DecimalSI)}, 47 | }, 48 | &v1.ResourceRequirements{ 49 | Limits: map[v1.ResourceName]resource.Quantity{"cpu": *resource.NewMilliQuantity(1000, resource.DecimalSI)}, 50 | }, 51 | false, 52 | }, 53 | { 54 | &v1.ResourceRequirements{}, 55 | &v1.ResourceRequirements{ 56 | Limits: map[v1.ResourceName]resource.Quantity{"cpu": *resource.NewMilliQuantity(1000, resource.DecimalSI)}, 57 | }, 58 | true, 59 | }, 60 | { 61 | &v1.ResourceRequirements{ 62 | Limits: map[v1.ResourceName]resource.Quantity{"cpu": *resource.NewMilliQuantity(1000, resource.DecimalSI)}, 63 | }, 64 | &v1.ResourceRequirements{}, 65 | true, 66 | }, 67 | { 68 | &v1.ResourceRequirements{}, 69 | &v1.ResourceRequirements{ 70 | Requests: map[v1.ResourceName]resource.Quantity{"cpu": *resource.NewMilliQuantity(1000, resource.DecimalSI)}, 71 | }, 72 | true, 73 | }, 74 | { 75 | &v1.ResourceRequirements{ 76 | Requests: map[v1.ResourceName]resource.Quantity{"cpu": *resource.NewMilliQuantity(1000, resource.DecimalSI)}, 77 | }, 78 | &v1.ResourceRequirements{}, 79 | true, 80 | }, 81 | } 82 | 83 | for i, c := range cases { 84 | changed, _ := CompareResources(*c.current, *c.desired) 85 | if changed != c.expected { 86 | t.Errorf("Case %d: Expected %v but got %v", i, c.expected, changed) 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /olm_deploy/operatorregistry/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/operator-framework/upstream-registry-builder AS registry-builder 2 | 3 | FROM registry.ci.openshift.org/ocp/4.7:base 4 | 5 | WORKDIR / 6 | 7 | COPY bundle/manifests /manifests/ 8 | COPY bundle/metadata /metadata/ 9 | 10 | # 11 | # TODO: Remove this once we migrate olm-deploy to use `opm index add` 12 | # to deploy using the operator bundle image instead. This is 13 | # currently a temporary solution as per /bin/initializer 14 | # does not support loading package and channel information from 15 | # the metadata/annotations.yaml 16 | # 17 | COPY olm_deploy/operatorregistry/elasticsearch-operator.package.yaml /manifests/ 18 | 19 | RUN chmod -R g+w /manifests /metadata 20 | 21 | COPY olm_deploy/scripts/registry-init.sh olm_deploy/scripts/env.sh /scripts/ 22 | 23 | COPY --from=registry-builder /bin/initializer /usr/bin/initializer 24 | COPY --from=registry-builder /bin/opm /usr/bin/opm 25 | # 26 | # TODO: Remove this after merging switch to opm to get registry builds into CI first. 27 | # 28 | COPY --from=registry-builder /bin/registry-server /usr/bin/registry-server 29 | COPY --from=registry-builder /bin/grpc_health_probe /usr/bin/grpc_health_probe 30 | 31 | # Change working directory to enable registry migrations 32 | # See https://bugzilla.redhat.com/show_bug.cgi?id=1843702 33 | # See https://bugzilla.redhat.com/show_bug.cgi?id=1827612 34 | WORKDIR /bundle 35 | -------------------------------------------------------------------------------- /olm_deploy/operatorregistry/catalog-source.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: CatalogSource 3 | metadata: 4 | name: elasticsearch-catalog 5 | spec: 6 | sourceType: grpc 7 | address: ${CLUSTER_IP}:50051 8 | -------------------------------------------------------------------------------- /olm_deploy/operatorregistry/elasticsearch-operator.package.yaml: -------------------------------------------------------------------------------- 1 | packageName: elasticsearch-operator 2 | defaultChannel: "stable" 3 | channels: 4 | - name: "5.8" 5 | currentCSV: elasticsearch-operator.v5.8.0 6 | - name: "stable" 7 | currentCSV: elasticsearch-operator.v5.8.0 8 | - name: "stable-5.8" 9 | currentCSV: elasticsearch-operator.v5.8.0 10 | -------------------------------------------------------------------------------- /olm_deploy/operatorregistry/registry-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: elasticsearch-operator-registry 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | registry.operator.elasticsearch: "true" 10 | template: 11 | metadata: 12 | labels: 13 | registry.operator.elasticsearch: "true" 14 | name: elasticsearch-operator-registry 15 | spec: 16 | initContainers: 17 | - name: mutate-csv-and-generate-sqlite-db 18 | image: ${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY} 19 | imagePullPolicy: Always 20 | command: 21 | - sh 22 | args: 23 | - /scripts/registry-init.sh 24 | securityContext: 25 | allowPrivilegeEscalation: false 26 | capabilities: 27 | drop: 28 | - ALL 29 | seccompProfile: 30 | type: "RuntimeDefault" 31 | volumeMounts: 32 | - name: workdir 33 | mountPath: /bundle 34 | env: 35 | - name: IMAGE_ELASTICSEARCH_OPERATOR 36 | value: ${IMAGE_ELASTICSEARCH_OPERATOR} 37 | - name: IMAGE_ELASTICSEARCH6 38 | value: ${IMAGE_ELASTICSEARCH6} 39 | - name: IMAGE_ELASTICSEARCH_PROXY 40 | value: ${IMAGE_ELASTICSEARCH_PROXY} 41 | - name: IMAGE_LOGGING_KIBANA6 42 | value: ${IMAGE_LOGGING_KIBANA6} 43 | - name: IMAGE_LOGGING_CURATOR5 44 | value: ${IMAGE_LOGGING_CURATOR5} 45 | 46 | containers: 47 | - name: elasticsearch-operator-registry 48 | image: ${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY} 49 | imagePullPolicy: Always 50 | command: 51 | - /usr/bin/registry-server 52 | - --database=/bundle/bundles.db 53 | volumeMounts: 54 | - name: workdir 55 | mountPath: /bundle 56 | ports: 57 | - containerPort: 50051 58 | name: grpc 59 | protocol: TCP 60 | livenessProbe: 61 | exec: 62 | command: 63 | - grpc_health_probe 64 | - -addr=localhost:50051 65 | readinessProbe: 66 | exec: 67 | command: 68 | - grpc_health_probe 69 | - -addr=localhost:50051 70 | resources: 71 | requests: 72 | cpu: 10m 73 | memory: 100Mi 74 | securityContext: 75 | allowPrivilegeEscalation: false 76 | capabilities: 77 | drop: 78 | - ALL 79 | seccompProfile: 80 | type: "RuntimeDefault" 81 | terminationMessagePath: /dev/termination-log 82 | terminationMessagePolicy: File 83 | securityContext: 84 | runAsNonRoot: true 85 | volumes: 86 | - name: workdir 87 | emptyDir: {} 88 | -------------------------------------------------------------------------------- /olm_deploy/operatorregistry/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-operator-registry 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | registry.operator.elasticsearch: "true" 9 | ports: 10 | - name: grpc 11 | port: 50051 12 | protocol: TCP 13 | targetPort: 50051 14 | sessionAffinity: None 15 | 16 | -------------------------------------------------------------------------------- /olm_deploy/scripts/catalog-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | source $(dirname "${BASH_SOURCE[0]}")/env.sh 4 | 5 | echo "Building operator registry image ${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY}" 6 | podman build -f olm_deploy/operatorregistry/Dockerfile -t ${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY} . 7 | 8 | if [ -n ${LOCAL_IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY} ] ; then 9 | coproc oc -n openshift-image-registry port-forward service/image-registry 5000:5000 10 | trap "kill -15 $COPROC_PID" EXIT 11 | read PORT_FORWARD_STDOUT <&"${COPROC[0]}" 12 | if [[ "$PORT_FORWARD_STDOUT" =~ ^Forwarding.*5000$ ]] ; then 13 | user=$(oc whoami | sed s/://) 14 | podman login --tls-verify=false -u ${user} -p $(oc whoami -t) 127.0.0.1:5000 15 | else 16 | echo "Unexpected message from oc port-forward: $PORT_FORWARD_STDOUT" 17 | fi 18 | fi 19 | echo "Pushing image ${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY}" 20 | podman push --tls-verify=false ${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY} 21 | -------------------------------------------------------------------------------- /olm_deploy/scripts/catalog-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | 4 | source $(dirname "${BASH_SOURCE[0]}")/env.sh 5 | 6 | echo "Using images: " 7 | echo "elastic operator registry: ${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY}" 8 | echo "elastic operator: ${IMAGE_ELASTICSEARCH_OPERATOR}" 9 | echo "kube rbac proxy: ${IMAGE_KUBE_RBAC_PROXY}" 10 | echo "elastic6: ${IMAGE_ELASTICSEARCH6}" 11 | echo "elasticsearch proxy: ${IMAGE_ELASTICSEARCH_PROXY}" 12 | echo "kibana: ${IMAGE_LOGGING_KIBANA6}" 13 | echo "curator5: ${IMAGE_LOGGING_CURATOR5}" 14 | 15 | echo "In namespace: ${ELASTICSEARCH_OPERATOR_NAMESPACE}" 16 | 17 | if oc get project ${ELASTICSEARCH_OPERATOR_NAMESPACE} > /dev/null 2>&1 ; then 18 | echo using existing project ${ELASTICSEARCH_OPERATOR_NAMESPACE} for operator catalog deployment 19 | else 20 | oc create namespace ${ELASTICSEARCH_OPERATOR_NAMESPACE} 21 | fi 22 | 23 | # substitute image names into the catalog deployment yaml and deploy it 24 | envsubst < olm_deploy/operatorregistry/registry-deployment.yaml | oc create -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} -f - 25 | olm_deploy/scripts/wait_for_deployment.sh ${ELASTICSEARCH_OPERATOR_NAMESPACE} elasticsearch-operator-registry 26 | oc wait -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} --timeout=120s --for=condition=available deployment/elasticsearch-operator-registry 27 | 28 | # create the catalog service 29 | oc create -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} -f olm_deploy/operatorregistry/service.yaml 30 | 31 | # find the catalog service ip, substitute it into the catalogsource and create the catalog source 32 | export CLUSTER_IP=$(oc get -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} service elasticsearch-operator-registry -o jsonpath='{.spec.clusterIP}') 33 | envsubst < olm_deploy/operatorregistry/catalog-source.yaml | oc create -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} -f - 34 | -------------------------------------------------------------------------------- /olm_deploy/scripts/catalog-uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | 4 | ELASTICSEARCH_OPERATOR_NAMESPACE=${ELASTICSEARCH_OPERATOR_NAMESPACE:-openshift-operators-redhat} 5 | 6 | oc delete --wait --ignore-not-found project ${ELASTICSEARCH_OPERATOR_NAMESPACE} 7 | -------------------------------------------------------------------------------- /olm_deploy/scripts/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eou pipefail 3 | 4 | export OCP_VERSION=${OCP_VERSION:-4.11} 5 | export LOGGING_VERSION=${LOGGING_VERSION:-5.8} 6 | export LOGGING_ES_VERSION=${LOGGING_ES_VERSION:-6.8.1} 7 | export LOGGING_KIBANA_VERSION=${LOGGING_KIBANA_VERSION:-6.8.1} 8 | export LOGGING_ES_PROXY_VERSION=${LOGGING_ES_PROXY_VERSION:-1.0} 9 | export LOGGING_CURATOR_VERSION=${LOGGING_CURATOR_VERSION:-5.8.1} 10 | export LOGGING_IS=${LOGGING_IS:-openshift-logging} 11 | 12 | #openshift images 13 | export IMAGE_KUBE_RBAC_PROXY=${IMAGE_KUBE_RBAC_PROXY:-quay.io/openshift/origin-kube-rbac-proxy:${OCP_VERSION}} 14 | 15 | #logging images 16 | IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY=${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY:-quay.io/${LOGGING_IS}/elasticsearch-operator-registry:${LOGGING_VERSION}} 17 | export IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY=${IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY:-$LOCAL_IMAGE_ELASTICSEARCH_OPERATOR_REGISTRY} 18 | 19 | export IMAGE_ELASTICSEARCH_OPERATOR=${IMAGE_ELASTICSEARCH_OPERATOR:-quay.io/${LOGGING_IS}/elasticsearch-operator:${LOGGING_VERSION}} 20 | export IMAGE_ELASTICSEARCH6=${IMAGE_ELASTICSEARCH6:-quay.io/${LOGGING_IS}/elasticsearch6:${LOGGING_ES_VERSION}} 21 | export IMAGE_ELASTICSEARCH_PROXY=${IMAGE_ELASTICSEARCH_PROXY:-quay.io/${LOGGING_IS}/elasticsearch-proxy:${LOGGING_ES_PROXY_VERSION}} 22 | export IMAGE_LOGGING_KIBANA6=${IMAGE_LOGGING_KIBANA6:-quay.io/${LOGGING_IS}/kibana6:${LOGGING_KIBANA_VERSION}} 23 | export IMAGE_LOGGING_CURATOR5=${IMAGE_LOGGING_CURATOR5:-quay.io/${LOGGING_IS}/curator5:${LOGGING_CURATOR_VERSION}} 24 | 25 | export ELASTICSEARCH_OPERATOR_NAMESPACE=${ELASTICSEARCH_OPERATOR_NAMESPACE:-openshift-operators-redhat} 26 | -------------------------------------------------------------------------------- /olm_deploy/scripts/operator-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | 4 | source $(dirname "${BASH_SOURCE[0]}")/env.sh 5 | 6 | if oc get project ${ELASTICSEARCH_OPERATOR_NAMESPACE} > /dev/null 2>&1 ; then 7 | echo using existing project ${ELASTICSEARCH_OPERATOR_NAMESPACE} for operator installation 8 | else 9 | oc create namespace ${ELASTICSEARCH_OPERATOR_NAMESPACE} 10 | fi 11 | 12 | set +e 13 | oc label ns/${ELASTICSEARCH_OPERATOR_NAMESPACE} openshift.io/cluster-monitoring=true --overwrite 14 | set -e 15 | 16 | echo "##################" 17 | echo "oc version" 18 | oc version 19 | echo "##################" 20 | 21 | # create the operatorgroup 22 | oc create -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} -f olm_deploy/subscription/operator-group.yaml 23 | 24 | # create the subscription 25 | export OPERATOR_PACKAGE_CHANNEL="\"${LOGGING_VERSION}\"" 26 | subscription=$(envsubst < olm_deploy/subscription/subscription.yaml) 27 | echo "Creating:" 28 | echo "$subscription" 29 | echo "$subscription" | oc create -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} -f - 30 | 31 | olm_deploy/scripts/wait_for_deployment.sh ${ELASTICSEARCH_OPERATOR_NAMESPACE} elasticsearch-operator 32 | oc wait -n ${ELASTICSEARCH_OPERATOR_NAMESPACE} --timeout=180s --for=condition=available deployment/elasticsearch-operator 33 | -------------------------------------------------------------------------------- /olm_deploy/scripts/operator-uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | 4 | source $(dirname "${BASH_SOURCE[0]}")/env.sh 5 | 6 | oc delete --wait --ignore-not-found ns ${ELASTICSEARCH_OPERATOR_NAMESPACE} 7 | 8 | oc delete --wait --ignore-not-found crd kibanas.logging.openshift.io 9 | oc delete --wait --ignore-not-found crd elasticsearches.logging.openshift.io 10 | -------------------------------------------------------------------------------- /olm_deploy/scripts/registry-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eou pipefail 4 | source $(dirname "${BASH_SOURCE[0]}")/env.sh 5 | 6 | echo -e "Dumping IMAGE env vars\n" 7 | env | grep IMAGE 8 | echo -e "\n\n" 9 | 10 | # update the manifest with the image built by ci 11 | sed -i "s,quay.io/openshift-logging/elasticsearch-operator:latest,${IMAGE_ELASTICSEARCH_OPERATOR}," /manifests/*clusterserviceversion.yaml 12 | sed -i "s,quay.io/openshift/origin-kube-rbac-proxy:latest,${IMAGE_KUBE_RBAC_PROXY}," /manifests/*clusterserviceversion.yaml 13 | sed -i "s,quay.io/openshift-logging/elasticsearch6:6.8.1,${IMAGE_ELASTICSEARCH6}," /manifests/*clusterserviceversion.yaml 14 | sed -i "s,quay.io/openshift-logging/elasticsearch-proxy:1.0,${IMAGE_ELASTICSEARCH_PROXY}," /manifests/*clusterserviceversion.yaml 15 | sed -i "s,quay.io/openshift-logging/kibana6:6.8.1,${IMAGE_LOGGING_KIBANA6}," /manifests/*clusterserviceversion.yaml 16 | sed -i "s,quay.io/openshift-logging/curator5:5.8.1,${IMAGE_LOGGING_CURATOR5}," /manifests/*clusterserviceversion.yaml 17 | 18 | # update the manifest to pull always the operator image for non-CI environments 19 | if [ "${OPENSHIFT_CI:-false}" == "false" ] ; then 20 | echo -e "Set operator deployment's imagePullPolicy to 'Always'\n\n" 21 | sed -i 's,imagePullPolicy:\ IfNotPresent,imagePullPolicy:\ Always,' /manifests/*clusterserviceversion.yaml 22 | fi 23 | 24 | echo -e "substitution complete, dumping new csv\n\n" 25 | cat /manifests/*clusterserviceversion.yaml 26 | 27 | echo "generating sqlite database" 28 | 29 | /usr/bin/initializer --manifests=/manifests --output=/bundle/bundles.db --permissive=true 30 | -------------------------------------------------------------------------------- /olm_deploy/scripts/wait_for_deployment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # $1 - namespace 3 | # $2 - deployment name 4 | set -eou pipefail 5 | 6 | retries=20 7 | until [[ "$retries" -le "0" ]]; do 8 | output=$(oc get deployment -n ${1} ${2} -o jsonpath='{.metadata.name}' 2>/dev/null || echo "waiting for deployment ${1}/${2}") 9 | 10 | if [ "${output}" = "${2}" ] ; then 11 | echo "deployment ${1}/${2} has been created" >&2 12 | exit 0 13 | fi 14 | 15 | retries=$((retries - 1)) 16 | echo "${output} - remaining attempts: ${retries}" >&2 17 | 18 | sleep 3 19 | done 20 | 21 | exit 1 -------------------------------------------------------------------------------- /olm_deploy/subscription/operator-group.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1 2 | kind: OperatorGroup 3 | metadata: 4 | name: elasticsearch-operator 5 | 6 | -------------------------------------------------------------------------------- /olm_deploy/subscription/subscription.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: Subscription 3 | metadata: 4 | name: elasticsearch-operator 5 | spec: 6 | channel: ${OPERATOR_PACKAGE_CHANNEL} 7 | name: elasticsearch-operator 8 | source: elasticsearch-catalog 9 | sourceNamespace: ${ELASTICSEARCH_OPERATOR_NAMESPACE} 10 | -------------------------------------------------------------------------------- /origin-meta.yaml: -------------------------------------------------------------------------------- 1 | from: 2 | - source: registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder\:v(?:[\.0-9\-]*).* 3 | target: registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.20-openshift-4.14 AS builder 4 | - source: registry.redhat.io/ubi9:9.(\d)-([\.0-9])* 5 | target: registry.ci.openshift.org/ocp/4.14:base 6 | env: 7 | - source: RUNBOOK_BASE_URL=.* 8 | target: RUNBOOK_BASE_URL="https://github.com/openshift/elasticsearch-operator/blob/master/docs/alerts.md" 9 | -------------------------------------------------------------------------------- /test/e2e/elasticsearch_write_test.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/openshift/elasticsearch-operator/test/utils" 9 | ) 10 | 11 | func TestElasticsearchWrite(t *testing.T) { 12 | setupK8sClient(t) 13 | t.Run("elasticsearch write", esWriteTest) 14 | } 15 | 16 | func esWriteTest(t *testing.T) { 17 | namespace := operatorNamespace 18 | 19 | // Deploy a single node cluster, wait for success 20 | esUUID := utils.GenerateUUID() 21 | t.Logf("Using UUID for elasticsearch CR: %v", esUUID) 22 | 23 | dataUUID := utils.GenerateUUID() 24 | t.Logf("Using GenUUID for data nodes: %v", dataUUID) 25 | 26 | cr, err := createElasticsearchCR(t, k8sClient, esUUID, dataUUID, 1) 27 | if err != nil { 28 | t.Fatalf("could not create exampleElasticsearch: %v", err) 29 | } 30 | 31 | dplName := fmt.Sprintf("elasticsearch-%v-cdm-%v-1", esUUID, dataUUID) 32 | err = utils.WaitForDeployment(t, k8sClient, operatorNamespace, dplName, 1, retryInterval, timeout) 33 | if err != nil { 34 | t.Fatalf("timed out waiting for first node deployment %v: %v", dplName, err) 35 | } 36 | matchingLabels := map[string]string{ 37 | "cluster-name": cr.GetName(), 38 | "component": "elasticsearch", 39 | } 40 | pods, err := utils.WaitForPods(t, k8sClient, namespace, matchingLabels, retryInterval, timeout) 41 | if err != nil { 42 | t.Fatalf("failed to wait for pods: %v", err) 43 | } 44 | podName := pods.Items[0].GetName() 45 | 46 | var cmd string 47 | var execExpect func(text string) 48 | execExpect = func(text string) { 49 | code, _, _ := ExecInPod(k8sConfig, namespace, podName, cmd, "elasticsearch") 50 | if strings.Index(code, text) < 0 { 51 | t.Errorf("cmd [%s] output does not contain expected text %s", cmd, text) 52 | } 53 | } 54 | 55 | cmd = "es_util --query=foo/_doc/7 -d '{\"key\":\"value\"}' -XPUT -w %{http_code}" 56 | execExpect("201") 57 | 58 | cmd = "es_util --query=foo-write/_doc/8 -d '{\"key\":\"value\"}' -XPUT -w %{http_code}" 59 | execExpect("404") 60 | 61 | cmd = "es_util --query=foo-write -XPUT -w %{http_code}" 62 | execExpect("200") 63 | 64 | cmd = "es_util --query=foo-write/_doc/1 -d '{\"key\":\"value\"}' -XPUT -w %{http_code}" 65 | execExpect("201") 66 | 67 | cleanupEsTest(t, k8sClient, operatorNamespace, esUUID) 68 | t.Log("Finished successfully") 69 | } 70 | -------------------------------------------------------------------------------- /test/e2e/main_test.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | "k8s.io/client-go/rest" 5 | "sigs.k8s.io/controller-runtime/pkg/client" 6 | ) 7 | 8 | const ( 9 | TestOperatorNamespaceEnv = "TEST_OPERATOR_NAMESPACE" 10 | ) 11 | 12 | var ( 13 | operatorNamespace string 14 | k8sClient client.Client 15 | k8sConfig *rest.Config 16 | projectRootDir string 17 | ) 18 | -------------------------------------------------------------------------------- /test/files/dummycrd.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/test/files/dummycrd.yaml -------------------------------------------------------------------------------- /test/files/emptyToken: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift/elasticsearch-operator/81cd6e70c15eea7c947e5f2aee3a9a2ba3b0806e/test/files/emptyToken -------------------------------------------------------------------------------- /test/files/testToken: -------------------------------------------------------------------------------- 1 | test 2 | -------------------------------------------------------------------------------- /test/helpers/envvars.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "fmt" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | v1 "k8s.io/api/core/v1" 10 | ) 11 | 12 | type EnvVarsExpectation struct { 13 | envVars []v1.EnvVar 14 | } 15 | 16 | type EnvVarExpectation struct { 17 | envVar v1.EnvVar 18 | } 19 | 20 | func ExpectEnvVars(envVars []v1.EnvVar) *EnvVarsExpectation { 21 | return &EnvVarsExpectation{envVars: envVars} 22 | } 23 | 24 | func (exp *EnvVarsExpectation) ToIncludeName(name string) *EnvVarExpectation { 25 | for _, env := range exp.envVars { 26 | if env.Name == name { 27 | return &EnvVarExpectation{env} 28 | } 29 | } 30 | Fail(fmt.Sprintf("Exp to find an environment variable %q in the list of env vars: %v", name, exp.envVars)) 31 | return nil 32 | } 33 | 34 | func (exp *EnvVarExpectation) WithFieldRefPath(path string) *EnvVarExpectation { 35 | Expect(exp.envVar.ValueFrom).ToNot(BeNil(), "The valueFrom field for %v is nil", exp.envVar) 36 | Expect(exp.envVar.ValueFrom.FieldRef).ToNot(BeNil(), "The valueFrom.fieldRef for %v is nil", exp.envVar) 37 | Expect(exp.envVar.ValueFrom.FieldRef.FieldPath).To(Equal(path)) 38 | return exp 39 | } 40 | 41 | func (exp *EnvVarExpectation) WithValue(value string) *EnvVarExpectation { 42 | Expect(exp.envVar.Value).To(Equal(value)) 43 | return exp 44 | } 45 | -------------------------------------------------------------------------------- /test/helpers/json.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | 8 | . "github.com/onsi/ginkgo" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | func NormalizeJSON(doc string) string { 13 | doc = strings.TrimSpace(doc) 14 | data := &map[string]interface{}{} 15 | if err := json.Unmarshal([]byte(doc), data); err != nil { 16 | Fail(fmt.Sprintf("Unable to normalize document '%s': %v", doc, err)) 17 | } 18 | response, err := json.MarshalIndent(data, "", "\t") 19 | if err != nil { 20 | Fail(fmt.Sprintf("Unable to normalize document '%s': %v", doc, err)) 21 | } 22 | return string(response) 23 | } 24 | 25 | type JSONExpectation struct { 26 | actual string 27 | } 28 | 29 | func ExpectJSON(doc string) *JSONExpectation { 30 | return &JSONExpectation{actual: doc} 31 | } 32 | 33 | func (exp *JSONExpectation) ToEqual(doc string) { 34 | actual := NormalizeJSON(exp.actual) 35 | expected := NormalizeJSON(doc) 36 | if actual != expected { 37 | fmt.Printf("Actual>:\n%s<\n", actual) 38 | fmt.Printf("Expected>:\n%s\n<", expected) 39 | Expect(actual).To(Equal(expected)) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /test/helpers/runtime/client.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "context" 5 | 6 | "k8s.io/apimachinery/pkg/api/errors" 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | type FakeClient struct { 12 | Error error 13 | Client client.Client 14 | updated []client.Object 15 | } 16 | 17 | func NewAlreadyExistsException() *errors.StatusError { 18 | return errors.NewAlreadyExists(schema.GroupResource{}, "existingname") 19 | } 20 | 21 | func NewFakeClient(client client.Client, err error) *FakeClient { 22 | return &FakeClient{ 23 | Error: err, 24 | Client: client, 25 | } 26 | } 27 | 28 | func (fw *FakeClient) WasUpdated(name string) bool { 29 | for _, o := range fw.updated { 30 | listkey := client.ObjectKeyFromObject(o) 31 | if listkey.Name == name { 32 | return true 33 | } 34 | } 35 | return false 36 | } 37 | 38 | func (fw *FakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { 39 | if fw.Error != nil { 40 | return fw.Error 41 | } 42 | return fw.Client.Create(ctx, obj, opts...) 43 | } 44 | 45 | func (fw *FakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { 46 | return fw.Error 47 | } 48 | 49 | func (fw *FakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { 50 | return fw.Error 51 | } 52 | 53 | func (fw *FakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { 54 | fw.updated = append(fw.updated, obj) 55 | return fw.Client.Update(ctx, obj, opts...) 56 | } 57 | 58 | func (fw *FakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { 59 | return fw.Client.Get(ctx, key, obj) 60 | } 61 | 62 | func (fw *FakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { 63 | return fw.Error 64 | } 65 | 66 | func (fw *FakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { 67 | return fw.Error 68 | } 69 | 70 | func (fw *FakeClient) Status() client.StatusWriter { 71 | return fw.Status() 72 | } 73 | -------------------------------------------------------------------------------- /test/helpers/yaml.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "gopkg.in/yaml.v2" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | ) 12 | 13 | func NormalizeYaml(doc string) string { 14 | doc = strings.TrimSpace(doc) 15 | data := &map[string]interface{}{} 16 | if err := yaml.Unmarshal([]byte(doc), data); err != nil { 17 | Fail(fmt.Sprintf("Unable to normalize document '%s': %v", doc, err)) 18 | } 19 | response, err := yaml.Marshal(data) 20 | if err != nil { 21 | Fail(fmt.Sprintf("Unable to normalize document '%s': %v", doc, err)) 22 | } 23 | return string(response) 24 | } 25 | 26 | type YamlExpectation struct { 27 | actual string 28 | } 29 | 30 | func ExpectYaml(doc string) *YamlExpectation { 31 | return &YamlExpectation{actual: doc} 32 | } 33 | 34 | func (exp *YamlExpectation) ToEqual(doc string) { 35 | actual := NormalizeYaml(exp.actual) 36 | expected := NormalizeYaml(doc) 37 | if actual != expected { 38 | fmt.Printf("Actual>:\n%s<\n", actual) 39 | fmt.Printf("Expected>:\n%s\n<", expected) 40 | Expect(actual).To(Equal(expected)) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var Version = "4.7.0" 4 | --------------------------------------------------------------------------------