├── .bingo ├── .gitignore ├── README.md ├── Variables.mk ├── bingo.mod ├── controller-gen.mod ├── go.mod ├── gofumpt.mod ├── golangci-lint.mod ├── kustomize.mod ├── operator-sdk.mod └── variables.env ├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── bundle.yaml │ ├── codeql-analysis.yml │ ├── go.yaml │ ├── quay.yaml │ └── scorecard.yaml ├── .gitignore ├── .golangci.yaml ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE.txt ├── Makefile ├── PROJECT ├── README.md ├── _config.yml ├── api └── v1beta1 │ ├── groupversion_info.go │ ├── lokistack_types.go │ └── zz_generated.deepcopy.go ├── bundle.Dockerfile ├── bundle ├── manifests │ ├── loki-operator-controller-manager-metrics-service_v1_service.yaml │ ├── loki-operator-manager-config_v1_configmap.yaml │ ├── loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml │ ├── loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml │ ├── loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml │ ├── loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml │ ├── loki-operator.clusterserviceversion.yaml │ └── loki.openshift.io_lokistacks.yaml ├── metadata │ └── annotations.yaml └── tests │ └── scorecard │ └── config.yaml ├── calculator.Dockerfile ├── cmd ├── loki-broker │ └── main.go └── size-calculator │ └── main.go ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── bases │ │ └── loki.openshift.io_lokistacks.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_lokistacks.yaml │ │ └── webhook_in_lokistacks.yaml ├── manager │ ├── controller_manager_config.yaml │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── bases │ │ └── loki-operator.clusterserviceversion.yaml │ └── kustomization.yaml ├── overlays │ ├── development │ │ ├── kustomization.yaml │ │ ├── manager_image_pull_policy_patch.yaml │ │ ├── manager_related_image_patch.yaml │ │ └── minio │ │ │ ├── deployment.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── pvc.yaml │ │ │ ├── secret.yaml │ │ │ └── service.yaml │ ├── openshift │ │ ├── kustomization.yaml │ │ ├── manager_auth_proxy_patch.yaml │ │ ├── manager_related_image_patch.yaml │ │ ├── manager_run_flags_patch.yaml │ │ ├── prometheus_service_monitor_patch.yaml │ │ └── size-calculator │ │ │ ├── cluster_monitoring_config.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── logfile_metric_daemonset.yaml │ │ │ ├── logfile_metric_role.yaml │ │ │ ├── logfile_metric_role_binding.yaml │ │ │ ├── logfile_metric_scc.yaml │ │ │ ├── logfile_metric_service.yaml │ │ │ ├── logfile_metric_service_account.yaml │ │ │ ├── logfile_metric_service_monitor.yaml │ │ │ ├── storage_size_calculator.yaml │ │ │ ├── storage_size_calculator_config.yaml │ │ │ └── user_workload_monitoring_config.yaml │ └── production │ │ ├── kustomization.yaml │ │ ├── manager_auth_proxy_patch.yaml │ │ ├── manager_related_image_patch.yaml │ │ ├── manager_run_flags_patch.yaml │ │ └── prometheus_service_monitor_patch.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── lokistack_editor_role.yaml │ ├── lokistack_viewer_role.yaml │ ├── prometheus_role.yaml │ ├── prometheus_role_binding.yaml │ ├── role.yaml │ └── role_binding.yaml ├── samples │ ├── kustomization.yaml │ └── loki_v1beta1_lokistack.yaml └── scorecard │ ├── bases │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ ├── basic.config.yaml │ └── olm.config.yaml ├── controllers ├── internal │ └── management │ │ └── state │ │ ├── state.go │ │ └── state_test.go ├── lokistack_controller.go └── lokistack_controller_test.go ├── docs ├── forwarding_logs_to_gateway.md ├── hack_loki_operator.md ├── hack_operator_make_run.md └── storage_size_calculator.md ├── go.mod ├── go.sum ├── hack ├── addons.yaml ├── boilerplate.go.txt ├── deploy-example-secret.sh ├── deploy-prometheus-secret.sh ├── lokistack_dev.yaml ├── lokistack_gateway_dev.yaml └── lokistack_gateway_ocp.yaml ├── img └── loki-operator.png ├── index.md ├── internal ├── external │ └── k8s │ │ ├── builder.go │ │ ├── client.go │ │ └── k8sfakes │ │ ├── fake_builder.go │ │ ├── fake_client.go │ │ ├── fake_client_extensions.go │ │ └── fake_status_writer.go ├── handlers │ ├── internal │ │ ├── gateway │ │ │ ├── base_domain.go │ │ │ ├── modes.go │ │ │ ├── modes_test.go │ │ │ ├── tenant_configmap.go │ │ │ ├── tenant_configmap_test.go │ │ │ ├── tenant_secrets.go │ │ │ └── tenant_secrets_test.go │ │ └── secrets │ │ │ ├── secrets.go │ │ │ └── secrets_test.go │ ├── lokistack_create_or_update.go │ └── lokistack_create_or_update_test.go ├── manifests │ ├── build.go │ ├── build_test.go │ ├── compactor.go │ ├── compactor_test.go │ ├── config.go │ ├── config_test.go │ ├── distributor.go │ ├── distributor_test.go │ ├── gateway.go │ ├── gateway_tenants.go │ ├── gateway_tenants_test.go │ ├── gateway_test.go │ ├── indexgateway.go │ ├── indexgateway_test.go │ ├── ingester.go │ ├── ingester_test.go │ ├── internal │ │ ├── config │ │ │ ├── build.go │ │ │ ├── build_test.go │ │ │ ├── loki-config.yaml │ │ │ ├── loki-runtime-config.yaml │ │ │ └── options.go │ │ ├── gateway │ │ │ ├── build.go │ │ │ ├── build_test.go │ │ │ ├── gateway-rbac.yaml │ │ │ ├── gateway-tenants.yaml │ │ │ ├── lokistack-gateway.rego │ │ │ └── options.go │ │ └── sizes.go │ ├── memberlist.go │ ├── mutate.go │ ├── mutate_test.go │ ├── node_placement_test.go │ ├── openshift │ │ ├── build.go │ │ ├── build_test.go │ │ ├── configure.go │ │ ├── opa_openshift.go │ │ ├── options.go │ │ ├── rbac.go │ │ ├── route.go │ │ ├── service_ca.go │ │ ├── serviceaccount.go │ │ ├── serviceaccount_test.go │ │ └── var.go │ ├── options.go │ ├── querier.go │ ├── querier_test.go │ ├── query-frontend.go │ ├── query-frontend_test.go │ ├── service_monitor.go │ ├── service_monitor_test.go │ ├── service_test.go │ └── var.go ├── metrics │ └── metrics.go ├── sizes │ └── predict.go └── status │ ├── components.go │ ├── components_test.go │ ├── lokistack.go │ ├── lokistack_test.go │ └── status.go ├── main.go └── tools └── tools.go /.bingo/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Ignore everything 3 | * 4 | 5 | # But not these files: 6 | !.gitignore 7 | !*.mod 8 | !README.md 9 | !Variables.mk 10 | !variables.env 11 | 12 | *tmp.mod 13 | -------------------------------------------------------------------------------- /.bingo/README.md: -------------------------------------------------------------------------------- 1 | # Project Development Dependencies. 2 | 3 | This is directory which stores Go modules with pinned buildable package that is used within this repository, managed by https://github.com/bwplotka/bingo. 4 | 5 | * Run `bingo get` to install all tools having each own module file in this directory. 6 | * Run `bingo get ` to install that have own module file in this directory. 7 | * For Makefile: Make sure to put `include .bingo/Variables.mk` in your Makefile, then use $() variable where is the .bingo/.mod. 8 | * For shell: Run `source .bingo/variables.env` to source all environment variable for each tool. 9 | * For go: Import `.bingo/variables.go` to for variable names. 10 | * See https://github.com/bwplotka/bingo or -h on how to add, remove or change binaries dependencies. 11 | 12 | ## Requirements 13 | 14 | * Go 1.14+ 15 | -------------------------------------------------------------------------------- /.bingo/Variables.mk: -------------------------------------------------------------------------------- 1 | # Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT. 2 | # All tools are designed to be build inside $GOBIN. 3 | BINGO_DIR := $(dir $(lastword $(MAKEFILE_LIST))) 4 | GOPATH ?= $(shell go env GOPATH) 5 | GOBIN ?= $(firstword $(subst :, ,${GOPATH}))/bin 6 | GO ?= $(shell which go) 7 | 8 | # Below generated variables ensure that every time a tool under each variable is invoked, the correct version 9 | # will be used; reinstalling only if needed. 10 | # For example for bingo variable: 11 | # 12 | # In your main Makefile (for non array binaries): 13 | # 14 | #include .bingo/Variables.mk # Assuming -dir was set to .bingo . 15 | # 16 | #command: $(BINGO) 17 | # @echo "Running bingo" 18 | # @$(BINGO) 19 | # 20 | BINGO := $(GOBIN)/bingo-v0.4.0 21 | $(BINGO): $(BINGO_DIR)/bingo.mod 22 | @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. 23 | @echo "(re)installing $(GOBIN)/bingo-v0.4.0" 24 | @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.4.0 "github.com/bwplotka/bingo" 25 | 26 | CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.5.0 27 | $(CONTROLLER_GEN): $(BINGO_DIR)/controller-gen.mod 28 | @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. 29 | @echo "(re)installing $(GOBIN)/controller-gen-v0.5.0" 30 | @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=controller-gen.mod -o=$(GOBIN)/controller-gen-v0.5.0 "sigs.k8s.io/controller-tools/cmd/controller-gen" 31 | 32 | GOFUMPT := $(GOBIN)/gofumpt-v0.1.1 33 | $(GOFUMPT): $(BINGO_DIR)/gofumpt.mod 34 | @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. 35 | @echo "(re)installing $(GOBIN)/gofumpt-v0.1.1" 36 | @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=gofumpt.mod -o=$(GOBIN)/gofumpt-v0.1.1 "mvdan.cc/gofumpt" 37 | 38 | GOLANGCI_LINT := $(GOBIN)/golangci-lint-v1.38.0 39 | $(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod 40 | @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. 41 | @echo "(re)installing $(GOBIN)/golangci-lint-v1.38.0" 42 | @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v1.38.0 "github.com/golangci/golangci-lint/cmd/golangci-lint" 43 | 44 | KUSTOMIZE := $(GOBIN)/kustomize-v3.8.7 45 | $(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod 46 | @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. 47 | @echo "(re)installing $(GOBIN)/kustomize-v3.8.7" 48 | @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v3.8.7 "sigs.k8s.io/kustomize/kustomize/v3" 49 | 50 | OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.11.0 51 | $(OPERATOR_SDK): $(BINGO_DIR)/operator-sdk.mod 52 | @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. 53 | @echo "(re)installing $(GOBIN)/operator-sdk-v1.11.0" 54 | @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=operator-sdk.mod -o=$(GOBIN)/operator-sdk-v1.11.0 "github.com/operator-framework/operator-sdk/cmd/operator-sdk" 55 | 56 | -------------------------------------------------------------------------------- /.bingo/bingo.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.16 4 | 5 | require github.com/bwplotka/bingo v0.4.0 6 | -------------------------------------------------------------------------------- /.bingo/controller-gen.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.16 4 | 5 | require sigs.k8s.io/controller-tools v0.5.0 // cmd/controller-gen 6 | -------------------------------------------------------------------------------- /.bingo/go.mod: -------------------------------------------------------------------------------- 1 | module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files. -------------------------------------------------------------------------------- /.bingo/gofumpt.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.16 4 | 5 | require mvdan.cc/gofumpt v0.1.1 6 | -------------------------------------------------------------------------------- /.bingo/golangci-lint.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.16 4 | 5 | require github.com/golangci/golangci-lint v1.38.0 // cmd/golangci-lint 6 | -------------------------------------------------------------------------------- /.bingo/kustomize.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.16 4 | 5 | require sigs.k8s.io/kustomize/kustomize/v3 v3.8.7 6 | -------------------------------------------------------------------------------- /.bingo/operator-sdk.mod: -------------------------------------------------------------------------------- 1 | module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT 2 | 3 | go 1.16 4 | 5 | replace github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible 6 | 7 | replace github.com/containerd/containerd => github.com/containerd/containerd v1.4.3 8 | 9 | replace github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.10.0 10 | 11 | replace golang.org/x/text => golang.org/x/text v0.3.3 12 | 13 | require github.com/operator-framework/operator-sdk v1.11.0 // cmd/operator-sdk 14 | -------------------------------------------------------------------------------- /.bingo/variables.env: -------------------------------------------------------------------------------- 1 | # Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT. 2 | # All tools are designed to be build inside $GOBIN. 3 | # Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk. 4 | GOBIN=${GOBIN:=$(go env GOBIN)} 5 | 6 | if [ -z "$GOBIN" ]; then 7 | GOBIN="$(go env GOPATH)/bin" 8 | fi 9 | 10 | 11 | BINGO="${GOBIN}/bingo-v0.4.0" 12 | 13 | CONTROLLER_GEN="${GOBIN}/controller-gen-v0.5.0" 14 | 15 | GOFUMPT="${GOBIN}/gofumpt-v0.1.1" 16 | 17 | GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.38.0" 18 | 19 | KUSTOMIZE="${GOBIN}/kustomize-v3.8.7" 20 | 21 | OPERATOR_SDK="${GOBIN}/operator-sdk-v1.11.0" 22 | 23 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore all files which are not go type 3 | !**/*.go 4 | !**/*.mod 5 | !**/*.sum 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | -------------------------------------------------------------------------------- /.github/workflows/bundle.yaml: -------------------------------------------------------------------------------- 1 | name: olm bundle 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | name: build 12 | runs-on: ubuntu-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | go: ['1.16'] 17 | steps: 18 | - name: Set up Go 1.x 19 | uses: actions/setup-go@v2 20 | with: 21 | go-version: ${{ matrix.go }} 22 | id: go 23 | - uses: actions/checkout@v2 24 | - name: Install make 25 | run: sudo apt-get install make 26 | - name: make bundle 27 | run: make bundle 28 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ master ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ master ] 20 | schedule: 21 | - cron: '21 18 * * 6' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'go' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /.github/workflows/go.yaml: -------------------------------------------------------------------------------- 1 | name: go 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | lint: 11 | name: lint 12 | runs-on: ubuntu-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | go: ['1.16'] 17 | steps: 18 | - name: Set up Go 1.x 19 | uses: actions/setup-go@v2 20 | with: 21 | go-version: ${{ matrix.go }} 22 | id: go 23 | - uses: actions/checkout@v2 24 | - name: Lint 25 | uses: golangci/golangci-lint-action@v2 26 | with: 27 | version: v1.38 28 | skip-go-installation: true 29 | only-new-issues: true 30 | args: --timeout=2m 31 | 32 | build-manager: 33 | name: Build Manager 34 | runs-on: ubuntu-latest 35 | strategy: 36 | fail-fast: false 37 | matrix: 38 | go: ['1.16'] 39 | steps: 40 | - name: Install make 41 | run: sudo apt-get install make 42 | - name: Set up Go 1.x 43 | uses: actions/setup-go@v2 44 | with: 45 | go-version: ${{ matrix.go }} 46 | id: go 47 | - uses: actions/checkout@v2 48 | - name: Build Manager 49 | run: |- 50 | make manager && git diff --exit-code 51 | 52 | build-broker: 53 | name: Build Broker 54 | runs-on: ubuntu-latest 55 | strategy: 56 | fail-fast: false 57 | matrix: 58 | go: ['1.16'] 59 | steps: 60 | - name: Install make 61 | run: sudo apt-get install make 62 | - name: Set up Go 1.x 63 | uses: actions/setup-go@v2 64 | with: 65 | go-version: ${{ matrix.go }} 66 | id: go 67 | - uses: actions/checkout@v2 68 | - name: Build Broker 69 | run: |- 70 | make bin/loki-broker && git diff --exit-code 71 | 72 | test: 73 | name: test 74 | runs-on: ubuntu-latest 75 | strategy: 76 | fail-fast: false 77 | matrix: 78 | go: ['1.16'] 79 | steps: 80 | - name: Set up Go 1.x 81 | uses: actions/setup-go@v2 82 | with: 83 | go-version: ${{ matrix.go }} 84 | id: go 85 | - uses: actions/checkout@v2 86 | - name: Run tests 87 | run: go test -coverprofile=profile.cov ./... 88 | - name: Send coverage 89 | uses: shogo82148/actions-goveralls@v1 90 | with: 91 | path-to-profile: profile.cov 92 | flag-name: Go-${{ matrix.go }} 93 | shallow: true 94 | -------------------------------------------------------------------------------- /.github/workflows/quay.yaml: -------------------------------------------------------------------------------- 1 | name: quay 2 | on: 3 | push: 4 | branches: 5 | - master 6 | 7 | env: 8 | IMAGE_REGISTRY: quay.io 9 | IMAGE_ORGANIZATION: openshift-logging 10 | IMAGE_OPERATOR_NAME: loki-operator 11 | IMAGE_BUNDLE_NAME: loki-operator-bundle 12 | IMAGE_CALCULATOR_NAME: storage-size-calculator 13 | 14 | jobs: 15 | publish-manager: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@master 19 | 20 | - name: Set up QEMU 21 | uses: docker/setup-qemu-action@v1 22 | 23 | - name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v1 25 | 26 | - name: Login to Quay.io 27 | uses: docker/login-action@v1 28 | with: 29 | registry: quay.io 30 | logout: true 31 | username: ${{ secrets.OPENSHIFT_LOGGING_USER }} 32 | password: ${{ secrets.OPENSHIFT_LOGGING_PASS }} 33 | 34 | - name: Get image tags 35 | id: image_tags 36 | run: | 37 | echo -n ::set-output name=IMAGE_TAGS:: 38 | PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_OPERATOR_NAME" 39 | TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1") 40 | BUILD_DATE="$(date -u +'%Y-%m-%d')" 41 | VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)" 42 | VCS_REF="$(git rev-parse --short HEAD)" 43 | TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF") 44 | ( IFS=$','; echo "${TAGS[*]}" ) 45 | 46 | - name: Build and publish image on quay.io 47 | uses: docker/build-push-action@v2 48 | with: 49 | context: . 50 | push: true 51 | tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}" 52 | 53 | publish-bundle: 54 | runs-on: ubuntu-latest 55 | steps: 56 | - uses: actions/checkout@master 57 | 58 | - name: Set up QEMU 59 | uses: docker/setup-qemu-action@v1 60 | 61 | - name: Set up Docker Buildx 62 | uses: docker/setup-buildx-action@v1 63 | 64 | - name: Login to Quay.io 65 | uses: docker/login-action@v1 66 | with: 67 | registry: quay.io 68 | logout: true 69 | username: ${{ secrets.OPENSHIFT_LOGGING_USER }} 70 | password: ${{ secrets.OPENSHIFT_LOGGING_PASS }} 71 | 72 | - name: Get image tags 73 | id: image_tags 74 | run: | 75 | echo -n ::set-output name=IMAGE_TAGS:: 76 | PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_BUNDLE_NAME" 77 | TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1") 78 | BUILD_DATE="$(date -u +'%Y-%m-%d')" 79 | VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)" 80 | VCS_REF="$(git rev-parse --short HEAD)" 81 | TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF") 82 | ( IFS=$','; echo "${TAGS[*]}" ) 83 | 84 | - name: Build and publish image on quay.io 85 | uses: docker/build-push-action@v2 86 | with: 87 | context: . 88 | file: bundle.Dockerfile 89 | push: true 90 | tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}" 91 | 92 | publish-size-calculator: 93 | runs-on: ubuntu-latest 94 | steps: 95 | - uses: actions/checkout@master 96 | 97 | - name: Set up QEMU 98 | uses: docker/setup-qemu-action@v1 99 | 100 | - name: Set up Docker Buildx 101 | uses: docker/setup-buildx-action@v1 102 | 103 | - name: Login to Quay.io 104 | uses: docker/login-action@v1 105 | with: 106 | registry: quay.io 107 | logout: true 108 | username: ${{ secrets.OPENSHIFT_LOGGING_USER }} 109 | password: ${{ secrets.OPENSHIFT_LOGGING_PASS }} 110 | 111 | - name: Get image tags 112 | id: image_tags 113 | run: | 114 | echo -n ::set-output name=IMAGE_TAGS:: 115 | PULLSPEC="$IMAGE_REGISTRY/$IMAGE_ORGANIZATION/$IMAGE_CALCULATOR_NAME" 116 | TAGS=("$PULLSPEC:latest", "$PULLSPEC:v0.0.1") 117 | BUILD_DATE="$(date -u +'%Y-%m-%d')" 118 | VCS_BRANCH="$(git rev-parse --abbrev-ref HEAD)" 119 | VCS_REF="$(git rev-parse --short HEAD)" 120 | TAGS+=("$PULLSPEC:$VCS_BRANCH-$BUILD_DATE-$VCS_REF") 121 | ( IFS=$','; echo "${TAGS[*]}" ) 122 | 123 | - name: Build and publish image on quay.io 124 | uses: docker/build-push-action@v2 125 | with: 126 | context: . 127 | file: calculator.Dockerfile 128 | push: true 129 | tags: "${{ steps.image_tags.outputs.IMAGE_TAGS }}" 130 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yaml: -------------------------------------------------------------------------------- 1 | name: scorecard 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | name: scorecard 12 | runs-on: ubuntu-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | go: ['1.16'] 17 | steps: 18 | - name: Set up Go 1.x 19 | uses: actions/setup-go@v2 20 | with: 21 | go-version: ${{ matrix.go }} 22 | id: go 23 | - uses: engineerd/setup-kind@v0.5.0 24 | with: 25 | version: "v0.11.1" 26 | - uses: actions/checkout@v2 27 | - name: Install make 28 | run: sudo apt-get install make 29 | - name: Run scorecard 30 | run: make scorecard 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Kubernetes Generated files - skip generated files, except for vendored files 18 | 19 | !vendor/**/zz_generated.* 20 | 21 | # editor and IDE paraphernalia 22 | .idea 23 | *.swp 24 | *.swo 25 | *~ 26 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | run: 3 | tests: false 4 | skip-files: 5 | - "example_.+_test.go$" 6 | 7 | # golangci.com configuration 8 | # https://github.com/golangci/golangci/wiki/Configuration 9 | linters-settings: 10 | govet: 11 | check-shadowing: true 12 | maligned: 13 | suggest-new: true 14 | misspell: 15 | locale: US 16 | 17 | linters: 18 | enable-all: false 19 | enable: 20 | - deadcode # Finds unused code 21 | - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases 22 | - goerr113 # checks that errors are wrapped according to go 1.13 error wrapping tools 23 | - gofumpt # checks that gofumpt was run on all source code 24 | - goimports # checks that goimports was run on all source code 25 | - golint 26 | - gosimple # Linter for Go source code that specializes in simplifying a code 27 | - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string 28 | - ineffassign # Detects when assignments to existing variables are not used 29 | - misspell # spell checker 30 | - rowserrcheck # checks whether Err of rows is checked successfully 31 | - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks 32 | - structcheck # Finds unused struct fields 33 | - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code 34 | - unused # Checks Go code for unused constants, variables, functions and types 35 | - varcheck # Finds unused global variables and constants 36 | 37 | issues: 38 | exclude-use-default: false 39 | exclude-rules: 40 | # - text: "could be of size" 41 | # path: api/v1beta1/lokistack_types.go 42 | # linters: 43 | # - maligned 44 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributing to Loki Operator 2 | 3 | ## Ideology 4 | 5 | OpenShift has proven to be a powerful and successful platform for running containers in production. Our primary goal is to bring Loki Operator to our customers. That being said, it is a very large platform intended for large-scale production use. It is not intended to be ephemeral. 6 | 7 | The tools required to run and test an OCP cluster are complex and cumbersome. The current processes to build an OCP cluster include the slack cluster-bot, openshift-install script, and CRC. The fastest route to create a working OCP cluster is 45 minutes. CRC *may* be faster, but it requires over [half of your local machine's resources](https://coreos.slack.com/archives/GGUR75P60/p1591803889037800) and doesn’t handle sleeping/suspending very well. Using openshift-install comes with its own [headaches](https://coreos.slack.com/archives/GGUR75P60/p1615458361119300). These blockers cause a significant amount of [wasted time](https://coreos.slack.com/archives/GGUR75P60/p1599242159479000?thread_ts=1599241354.478700&cid=GGUR75P60) that could be spent on more valuable things. 8 | 9 | Nevertheless, I argue that none of this is necessary. The problems are caused when we bastardize a large, complex, production platform for testing and tooling. OpenShift is a superset of Kubernetes. Operators are now Kubernetes native. Given this reality, we have called the Loki Operator a Kubernetes operator rather than an OpenShift operator. This may seem like a trivial delineation, but it isn’t. The operator has been designed from the beginning using Kubernetes tools and APIs. This has allowed us to build, test, and deploy in very little time with very little effort. It is not uncommon to create a pull request and have it [reviewed and merged](https://github.com/ViaQ/loki-operator/pulls?q=is%3Apr+is%3Aclosed) within 15 minutes. 10 | 11 | There are certainly OCP exclusives that we want to program into the Loki Operator, but this shouldn’t block or break the primary objectives. In other words, the Loki Operator should be Kubernetes first and OpenShift second. The Loki Operator should be open to using the OpenShift APIs without requiring them. All tools, automation, scripts, make targets, etc, should work naturally with Kubernetes and Kubernetes compatible APIs. OCP exclusives should be opt-in. It might be natural for you to think this causes obstruction for deploying to OCP, but that is far from true. Packaging for OCP should be a scripted process that, once opted in, should build all of the necessary components. So far, it has proven to be successful. 12 | 13 | ## Tooling 14 | 15 | We use [KinD](https://github.com/kubernetes-sigs/kind) to deploy and test the Loki Operator. We have had no compatibility issues, no wasted time on a learning curve, no failed clusters, no token expirations, no cluster expirations, no spinning laptop fans from gluttonous virtual machines, etc. It takes approximately 20 seconds to create a local KinD cluster and your machine won’t even notice it’s running. The cluster is fully compatible with all Kubernetes APIs and the operator runs on KinD perfectly. After your KinD cluster is created your kubeconfig is updated and the Makefile will work. The Makefiles and scripts are written to work with kubectl. This abstraction prevents any unnecessary complications caused by magic processes like deploying images to internal clusters, etc. 16 | 17 | 18 | ## Testing 19 | 20 | Tests should be succinct and without dependencies. This means that unit tests are the de-facto form of testing the Loki Operator. Unit tests are written with the standard Go library using [testify](https://github.com/stretchr/testify) for assertions. [Counterfeiter](https://github.com/maxbrunsfeld/counterfeiter) is included for generating test fakes and stubs for all dependencies. This library provides an API for generating fake implementations of interfaces for injecting them into testable units of code. Unit tests should implement or stub *only the parts required to test*. Large, all-inclusive structs should be avoided in favor of concise, single responsibility functions. This encourages small tests with minimal assertions to keep them hyper-focused, making tests easy to create *and* maintain. -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.16 as builder 3 | 4 | WORKDIR /workspace 5 | # Copy the Go Modules manifests 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | # cache deps before building and copying source so that we don't need to re-download as much 9 | # and so that source changes don't invalidate our downloaded layer 10 | RUN go mod download 11 | 12 | # Copy the go source 13 | COPY main.go main.go 14 | COPY api/ api/ 15 | COPY controllers/ controllers/ 16 | COPY internal/ internal/ 17 | 18 | # Build 19 | RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -o manager main.go 20 | 21 | # Use distroless as minimal base image to package the manager binary 22 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 23 | FROM gcr.io/distroless/static:nonroot 24 | WORKDIR / 25 | COPY --from=builder /workspace/manager . 26 | USER 65532:65532 27 | 28 | ENTRYPOINT ["/manager"] 29 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: openshift.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | plugins: 5 | manifests.sdk.operatorframework.io/v2: {} 6 | scorecard.sdk.operatorframework.io/v2: {} 7 | projectName: loki-operator 8 | repo: github.com/ViaQ/loki-operator 9 | resources: 10 | - api: 11 | crdVersion: v1beta1 12 | namespaced: true 13 | controller: true 14 | domain: openshift.io 15 | group: loki 16 | kind: LokiStack 17 | path: github.com/ViaQ/loki-operator/api/v1beta1 18 | version: v1beta1 19 | version: "3" 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATION NOTICE 2 | 3 | This repository is deprecrated and transfered to upstream github.com/grafana/loki. Please re-open any issues and pull-request there. 4 | 5 | [![go test](https://github.com/ViaQ/loki-operator/workflows/go%20test/badge.svg)](https://github.com/ViaQ/loki-operator/actions) 6 | [![Coveralls github](https://img.shields.io/coveralls/github/ViaQ/loki-operator.svg)](https://coveralls.io/github/ViaQ/loki-operator) 7 | [![Report Card](https://goreportcard.com/badge/github.com/ViaQ/loki-operator)](https://goreportcard.com/report/github.com/ViaQ/loki-operator) 8 | 9 | ![](img/loki-operator.png) 10 | 11 | # Loki Operator by Red Hat 12 | 13 | This is the Kubernetes Operator for [Loki](https://grafana.com/docs/loki/latest/) 14 | provided by the Red Hat OpenShift engineering team. **This is currently a work in 15 | progress and is subject to large scale changes that will break any dependencies. 16 | Do not use this in any production environment.** 17 | 18 | ## Development 19 | 20 | Requirements: 21 | 22 | 1. Running Kubernetes cluster. Our team uses 23 | [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/) or 24 | [K3s](https://k3s.io/) for simplicity. 25 | 1. A container registry that you and your Kubernetes cluster can reach. We 26 | recommend [quay.io](https://quay.io/signin/). 27 | 28 | Build and push the container image and then deploy the operator with `make 29 | oci-build oci-push deploy IMG=quay.io/my-team/loki-operator:latest`. This will 30 | deploy to your active Kubernetes/OpenShift cluster defined by your local 31 | [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). 32 | 33 | For detailed step-by-step guide on how to start development and testing on Kind and OpenShift, 34 | check our [documentation](https://github.com/ViaQ/loki-operator/blob/master/docs/hack_loki_operator.md) 35 | 36 | Also, there is a [document](https://github.com/ViaQ/loki-operator/blob/master/docs/hack_operator_make_run.md) which 37 | demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift. 38 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-slate -------------------------------------------------------------------------------- /api/v1beta1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1beta1 contains API Schema definitions for the loki v1beta1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=loki.openshift.io 20 | package v1beta1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "loki.openshift.io", Version: "v1beta1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /bundle.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | # Core bundle labels. 4 | LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 5 | LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ 6 | LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ 7 | LABEL operators.operatorframework.io.bundle.package.v1=loki-operator 8 | LABEL operators.operatorframework.io.bundle.channels.v1=tech-preview 9 | LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown 10 | LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 11 | LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 12 | 13 | # Labels for testing. 14 | LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 15 | LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ 16 | 17 | # Copy files to locations specified by labels. 18 | COPY bundle/manifests /manifests/ 19 | COPY bundle/metadata /metadata/ 20 | COPY bundle/tests/scorecard /tests/scorecard/ 21 | -------------------------------------------------------------------------------- /bundle/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics 6 | creationTimestamp: null 7 | labels: 8 | app.kubernetes.io/instance: loki-operator-v0.0.1 9 | app.kubernetes.io/managed-by: operator-lifecycle-manager 10 | app.kubernetes.io/name: loki-operator 11 | app.kubernetes.io/part-of: cluster-logging 12 | app.kubernetes.io/version: 0.0.1 13 | name: loki-operator-controller-manager-metrics-service 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | protocol: TCP 19 | targetPort: https 20 | selector: 21 | app.kubernetes.io/instance: loki-operator-v0.0.1 22 | app.kubernetes.io/managed-by: operator-lifecycle-manager 23 | app.kubernetes.io/name: loki-operator 24 | app.kubernetes.io/part-of: cluster-logging 25 | app.kubernetes.io/version: 0.0.1 26 | name: loki-operator-controller-manager 27 | status: 28 | loadBalancer: {} 29 | -------------------------------------------------------------------------------- /bundle/manifests/loki-operator-manager-config_v1_configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | controller_manager_config.yaml: | 4 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 5 | kind: ControllerManagerConfig 6 | health: 7 | healthProbeBindAddress: :8081 8 | metrics: 9 | bindAddress: 127.0.0.1:8080 10 | webhook: 11 | port: 9443 12 | leaderElection: 13 | leaderElect: true 14 | resourceName: e3716011.openshift.io 15 | kind: ConfigMap 16 | metadata: 17 | labels: 18 | app.kubernetes.io/instance: loki-operator-v0.0.1 19 | app.kubernetes.io/managed-by: operator-lifecycle-manager 20 | app.kubernetes.io/name: loki-operator 21 | app.kubernetes.io/part-of: cluster-logging 22 | app.kubernetes.io/version: 0.0.1 23 | name: loki-operator-manager-config 24 | -------------------------------------------------------------------------------- /bundle/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: loki-operator-v0.0.1 6 | app.kubernetes.io/managed-by: operator-lifecycle-manager 7 | app.kubernetes.io/name: loki-operator 8 | app.kubernetes.io/part-of: cluster-logging 9 | app.kubernetes.io/version: 0.0.1 10 | name: loki-operator 11 | name: loki-operator-metrics-monitor 12 | spec: 13 | endpoints: 14 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 15 | interval: 30s 16 | path: /metrics 17 | scheme: https 18 | scrapeTimeout: 10s 19 | targetPort: 8443 20 | tlsConfig: 21 | caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt 22 | serverName: loki-operator-controller-manager-metrics-service.openshift-logging.svc 23 | selector: 24 | matchLabels: 25 | app.kubernetes.io/name: loki-operator 26 | -------------------------------------------------------------------------------- /bundle/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/instance: loki-operator-v0.0.1 7 | app.kubernetes.io/managed-by: operator-lifecycle-manager 8 | app.kubernetes.io/name: loki-operator 9 | app.kubernetes.io/part-of: cluster-logging 10 | app.kubernetes.io/version: 0.0.1 11 | name: loki-operator-metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - /metrics 15 | verbs: 16 | - get 17 | -------------------------------------------------------------------------------- /bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | annotations: 5 | include.release.openshift.io/self-managed-high-availability: "true" 6 | include.release.openshift.io/single-node-developer: "true" 7 | creationTimestamp: null 8 | labels: 9 | app.kubernetes.io/instance: loki-operator-v0.0.1 10 | app.kubernetes.io/managed-by: operator-lifecycle-manager 11 | app.kubernetes.io/name: loki-operator 12 | app.kubernetes.io/part-of: cluster-logging 13 | app.kubernetes.io/version: 0.0.1 14 | name: loki-operator-prometheus 15 | rules: 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - services 20 | - endpoints 21 | - pods 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | -------------------------------------------------------------------------------- /bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | annotations: 5 | include.release.openshift.io/self-managed-high-availability: "true" 6 | include.release.openshift.io/single-node-developer: "true" 7 | creationTimestamp: null 8 | labels: 9 | app.kubernetes.io/instance: loki-operator-v0.0.1 10 | app.kubernetes.io/managed-by: operator-lifecycle-manager 11 | app.kubernetes.io/name: loki-operator 12 | app.kubernetes.io/part-of: cluster-logging 13 | app.kubernetes.io/version: 0.0.1 14 | name: loki-operator-prometheus 15 | roleRef: 16 | apiGroup: rbac.authorization.k8s.io 17 | kind: Role 18 | name: loki-operator-prometheus 19 | subjects: 20 | - kind: ServiceAccount 21 | name: prometheus-k8s 22 | namespace: openshift-monitoring 23 | -------------------------------------------------------------------------------- /bundle/metadata/annotations.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | # Core bundle annotations. 3 | operators.operatorframework.io.bundle.mediatype.v1: registry+v1 4 | operators.operatorframework.io.bundle.manifests.v1: manifests/ 5 | operators.operatorframework.io.bundle.metadata.v1: metadata/ 6 | operators.operatorframework.io.bundle.package.v1: loki-operator 7 | operators.operatorframework.io.bundle.channels.v1: tech-preview 8 | operators.operatorframework.io.metrics.builder: operator-sdk-unknown 9 | operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 10 | operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 11 | 12 | # Annotations for testing. 13 | operators.operatorframework.io.test.mediatype.v1: scorecard+v1 14 | operators.operatorframework.io.test.config.v1: tests/scorecard/ 15 | -------------------------------------------------------------------------------- /bundle/tests/scorecard/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: 8 | - entrypoint: 9 | - scorecard-test 10 | - basic-check-spec 11 | image: quay.io/operator-framework/scorecard-test:v1.4.0 12 | labels: 13 | suite: basic 14 | test: basic-check-spec-test 15 | storage: 16 | spec: 17 | mountPath: {} 18 | - entrypoint: 19 | - scorecard-test 20 | - olm-bundle-validation 21 | image: quay.io/operator-framework/scorecard-test:v1.4.0 22 | labels: 23 | suite: olm 24 | test: olm-bundle-validation-test 25 | storage: 26 | spec: 27 | mountPath: {} 28 | - entrypoint: 29 | - scorecard-test 30 | - olm-crds-have-validation 31 | image: quay.io/operator-framework/scorecard-test:v1.4.0 32 | labels: 33 | suite: olm 34 | test: olm-crds-have-validation-test 35 | storage: 36 | spec: 37 | mountPath: {} 38 | - entrypoint: 39 | - scorecard-test 40 | - olm-crds-have-resources 41 | image: quay.io/operator-framework/scorecard-test:v1.4.0 42 | labels: 43 | suite: olm 44 | test: olm-crds-have-resources-test 45 | storage: 46 | spec: 47 | mountPath: {} 48 | - entrypoint: 49 | - scorecard-test 50 | - olm-spec-descriptors 51 | image: quay.io/operator-framework/scorecard-test:v1.4.0 52 | labels: 53 | suite: olm 54 | test: olm-spec-descriptors-test 55 | storage: 56 | spec: 57 | mountPath: {} 58 | - entrypoint: 59 | - scorecard-test 60 | - olm-status-descriptors 61 | image: quay.io/operator-framework/scorecard-test:v1.4.0 62 | labels: 63 | suite: olm 64 | test: olm-status-descriptors-test 65 | storage: 66 | spec: 67 | mountPath: {} 68 | storage: 69 | spec: 70 | mountPath: {} 71 | -------------------------------------------------------------------------------- /calculator.Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the calculator binary 2 | FROM golang:1.16 as builder 3 | 4 | WORKDIR /workspace 5 | # Copy the Go Modules manifests 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | # cache deps before building and copying source so that we don't need to re-download as much 9 | # and so that source changes don't invalidate our downloaded layer 10 | RUN go mod download 11 | 12 | # Copy the go source 13 | COPY cmd/size-calculator/main.go main.go 14 | COPY internal/ internal/ 15 | 16 | # Build 17 | RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -o size-calculator main.go 18 | 19 | # Use distroless as minimal base image to package the size-calculator binary 20 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 21 | FROM gcr.io/distroless/static:nonroot 22 | WORKDIR / 23 | COPY --from=builder /workspace/size-calculator . 24 | USER 65532:65532 25 | 26 | ENTRYPOINT ["/size-calculator"] 27 | -------------------------------------------------------------------------------- /cmd/size-calculator/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "os" 7 | "time" 8 | 9 | "github.com/ViaQ/loki-operator/internal/sizes" 10 | "github.com/prometheus/common/model" 11 | 12 | "github.com/ViaQ/logerr/log" 13 | ) 14 | 15 | const ( 16 | // defaultDuration is the time for which the metric needs to be predicted for. 17 | // It is passed as second parameter to predict_linear. 18 | defaultDuration string = "24h" 19 | // range1xSmall defines the range (in GB) 20 | // of t-shirt size 1x.small i.e., 0 <= 1x.small <= 500 21 | range1xSmall int = 500 22 | // sizeOneXSmall defines the size of a single Loki deployment 23 | // with small resources/limits requirements. This size is dedicated for setup **without** the 24 | // requirement for single replication factor and auto-compaction. 25 | sizeOneXSmall string = "1x.small" 26 | // sizeOneXMedium defines the size of a single Loki deployment 27 | // with small resources/limits requirements. This size is dedicated for setup **with** the 28 | // requirement for single replication factor and auto-compaction. 29 | sizeOneXMedium string = "1x.medium" 30 | ) 31 | 32 | func init() { 33 | log.Init("size-calculator") 34 | } 35 | 36 | func main() { 37 | log.Info("starting storage size calculator...") 38 | 39 | for { 40 | duration, parseErr := model.ParseDuration(defaultDuration) 41 | if parseErr != nil { 42 | log.Error(parseErr, "failed to parse duration") 43 | os.Exit(1) 44 | } 45 | 46 | logsCollected, err := sizes.PredictFor(duration) 47 | if err != nil { 48 | log.Error(err, "Failed to collect metrics data") 49 | os.Exit(1) 50 | } 51 | 52 | logsCollectedInGB := int(math.Ceil(logsCollected / math.Pow(1024, 3))) 53 | log.Info(fmt.Sprintf("Amount of logs expected in 24 hours is %f Bytes or %dGB", logsCollected, logsCollectedInGB)) 54 | 55 | if logsCollectedInGB <= range1xSmall { 56 | log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXSmall)) 57 | } else { 58 | log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXMedium)) 59 | } 60 | 61 | time.Sleep(1 * time.Minute) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. 4 | apiVersion: cert-manager.io/v1 5 | kind: Issuer 6 | metadata: 7 | name: selfsigned-issuer 8 | namespace: system 9 | spec: 10 | selfSigned: {} 11 | --- 12 | apiVersion: cert-manager.io/v1 13 | kind: Certificate 14 | metadata: 15 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 16 | namespace: system 17 | spec: 18 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 19 | dnsNames: 20 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 22 | issuerRef: 23 | kind: Issuer 24 | name: selfsigned-issuer 25 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 26 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/loki.openshift.io_lokistacks.yaml 6 | # +kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patchesStrategicMerge: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- patches/webhook_in_lokistacks.yaml 12 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 13 | 14 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 15 | # patches here are for enabling the CA injection for each CRD 16 | #- patches/cainjection_in_lokistacks.yaml 17 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 18 | 19 | # the following config is for teaching kustomize how to do kustomization for CRDs. 20 | configurations: 21 | - kustomizeconfig.yaml 22 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1beta1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhookClientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1beta1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhookClientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_lokistacks.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: lokistacks.loki.openshift.io 9 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_lokistacks.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: lokistacks.loki.openshift.io 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: e3716011.openshift.io 12 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | 7 | configMapGenerator: 8 | - files: 9 | - controller_manager_config.yaml 10 | name: manager-config 11 | apiVersion: kustomize.config.k8s.io/v1beta1 12 | kind: Kustomization 13 | images: 14 | - name: controller 15 | newName: quay.io/openshift-logging/loki-operator 16 | newTag: v0.0.1 17 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | selector: 7 | matchLabels: 8 | name: loki-operator-controller-manager 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: loki-operator-controller-manager 14 | spec: 15 | containers: 16 | - command: 17 | - /manager 18 | image: controller:latest 19 | imagePullPolicy: IfNotPresent 20 | name: manager 21 | ports: 22 | - containerPort: 8080 23 | name: metrics 24 | securityContext: 25 | allowPrivilegeEscalation: false 26 | livenessProbe: 27 | httpGet: 28 | path: /healthz 29 | port: 8081 30 | initialDelaySeconds: 15 31 | periodSeconds: 20 32 | readinessProbe: 33 | httpGet: 34 | path: /readyz 35 | port: 8081 36 | initialDelaySeconds: 5 37 | periodSeconds: 10 38 | terminationGracePeriodSeconds: 10 39 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../overlays/openshift 3 | - ../samples 4 | - ../scorecard 5 | -------------------------------------------------------------------------------- /config/overlays/development/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../crd 3 | - ../../rbac 4 | - ../../manager 5 | - ./minio 6 | 7 | # Adds namespace to all resources. 8 | namespace: default 9 | 10 | # Labels to add to all resources and selectors. 11 | #commonLabels: 12 | # someName: someValue 13 | commonLabels: 14 | app.kubernetes.io/name: loki-operator 15 | app.kubernetes.io/instance: loki-operator-v0.0.1 16 | app.kubernetes.io/version: "0.0.1" 17 | app.kubernetes.io/part-of: loki-operator 18 | app.kubernetes.io/managed-by: operator-lifecycle-manager 19 | 20 | patchesStrategicMerge: 21 | - manager_related_image_patch.yaml 22 | - manager_image_pull_policy_patch.yaml 23 | -------------------------------------------------------------------------------- /config/overlays/development/manager_image_pull_policy_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: manager 10 | imagePullPolicy: Always 11 | -------------------------------------------------------------------------------- /config/overlays/development/manager_related_image_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: manager 10 | env: 11 | - name: RELATED_IMAGE_LOKI 12 | value: docker.io/grafana/loki:2.4.1 13 | - name: RELATED_IMAGE_GATEWAY 14 | value: quay.io/observatorium/api:latest 15 | -------------------------------------------------------------------------------- /config/overlays/development/minio/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: minio 5 | spec: 6 | selector: 7 | matchLabels: 8 | app.kubernetes.io/name: minio 9 | strategy: 10 | type: Recreate 11 | template: 12 | metadata: 13 | labels: 14 | app.kubernetes.io/name: minio 15 | spec: 16 | containers: 17 | - command: 18 | - /bin/sh 19 | - -c 20 | - | 21 | mkdir -p /storage/loki && \ 22 | minio server /storage 23 | env: 24 | - name: MINIO_ACCESS_KEY 25 | value: minio 26 | - name: MINIO_SECRET_KEY 27 | value: minio123 28 | image: minio/minio 29 | name: minio 30 | ports: 31 | - containerPort: 9000 32 | volumeMounts: 33 | - mountPath: /storage 34 | name: storage 35 | volumes: 36 | - name: storage 37 | persistentVolumeClaim: 38 | claimName: minio 39 | -------------------------------------------------------------------------------- /config/overlays/development/minio/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - pvc.yaml 3 | - service.yaml 4 | - secret.yaml 5 | - deployment.yaml 6 | -------------------------------------------------------------------------------- /config/overlays/development/minio/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: minio 6 | name: minio 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | -------------------------------------------------------------------------------- /config/overlays/development/minio/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: test 5 | stringData: 6 | endpoint: http://minio.default.svc.cluster.local.:9000 7 | bucketnames: loki 8 | access_key_id: minio 9 | access_key_secret: minio123 10 | type: Opaque 11 | -------------------------------------------------------------------------------- /config/overlays/development/minio/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio 5 | spec: 6 | ports: 7 | - port: 9000 8 | protocol: TCP 9 | targetPort: 9000 10 | selector: 11 | app.kubernetes.io/name: minio 12 | type: ClusterIP 13 | -------------------------------------------------------------------------------- /config/overlays/openshift/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../crd 3 | - ../../rbac 4 | - ../../manager 5 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 6 | # crd/kustomization.yaml 7 | #- ../webhook 8 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 9 | #- ../certmanager 10 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 11 | - ../../prometheus 12 | 13 | # Adds namespace to all resources. 14 | namespace: openshift-logging 15 | 16 | # Value of this field is prepended to the 17 | # names of all resources, e.g. a deployment named 18 | # "wordpress" becomes "alices-wordpress". 19 | # Note that it should also match with the prefix (text before '-') of the namespace 20 | # field above. 21 | namePrefix: loki-operator- 22 | 23 | # Labels to add to all resources and selectors. 24 | #commonLabels: 25 | # someName: someValue 26 | commonLabels: 27 | app.kubernetes.io/name: loki-operator 28 | app.kubernetes.io/instance: loki-operator-v0.0.1 29 | app.kubernetes.io/version: "0.0.1" 30 | app.kubernetes.io/part-of: cluster-logging 31 | app.kubernetes.io/managed-by: operator-lifecycle-manager 32 | 33 | patchesStrategicMerge: 34 | # Protect the /metrics endpoint by putting it behind auth. 35 | # If you want your controller-manager to expose the /metrics 36 | # endpoint w/o any authn/z, please comment the following line. 37 | - manager_auth_proxy_patch.yaml 38 | - manager_related_image_patch.yaml 39 | - manager_run_flags_patch.yaml 40 | - prometheus_service_monitor_patch.yaml 41 | 42 | # apiVersion: kustomize.config.k8s.io/v1beta1 43 | # kind: Kustomization 44 | images: 45 | - name: controller 46 | newName: quay.io/openshift-logging/loki-operator 47 | newTag: v0.0.1 48 | 49 | # Mount the controller config file for loading manager configurations 50 | # through a ComponentConfig type 51 | #- manager_config_patch.yaml 52 | 53 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 54 | # crd/kustomization.yaml 55 | #- manager_webhook_patch.yaml 56 | 57 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 58 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 59 | # 'CERTMANAGER' needs to be enabled to use ca injection 60 | #- webhookcainjection_patch.yaml 61 | 62 | # the following config is for teaching kustomize how to do var substitution 63 | vars: 64 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 65 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 66 | # objref: 67 | # kind: Certificate 68 | # group: cert-manager.io 69 | # version: v1 70 | # name: serving-cert # this name should match the one in certificate.yaml 71 | # fieldref: 72 | # fieldpath: metadata.namespace 73 | #- name: CERTIFICATE_NAME 74 | # objref: 75 | # kind: Certificate 76 | # group: cert-manager.io 77 | # version: v1 78 | # name: serving-cert # this name should match the one in certificate.yaml 79 | #- name: SERVICE_NAMESPACE # namespace of the service 80 | # objref: 81 | # kind: Service 82 | # version: v1 83 | # name: webhook-service 84 | # fieldref: 85 | # fieldpath: metadata.namespace 86 | #- name: SERVICE_NAME 87 | # objref: 88 | # kind: Service 89 | # version: v1 90 | # name: webhook-service 91 | -------------------------------------------------------------------------------- /config/overlays/openshift/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: kube-rbac-proxy 10 | image: quay.io/openshift/origin-kube-rbac-proxy:latest 11 | args: 12 | - "--secure-listen-address=0.0.0.0:8443" 13 | - "--upstream=http://127.0.0.1:8080/" 14 | - "--logtostderr=true" 15 | - "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt" 16 | - "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key" 17 | - "--v=2" 18 | ports: 19 | - containerPort: 8443 20 | name: https 21 | volumeMounts: 22 | - mountPath: /var/run/secrets/serving-cert 23 | name: loki-operator-metrics-cert 24 | volumes: 25 | - name: loki-operator-metrics-cert 26 | secret: 27 | defaultMode: 420 28 | optional: true 29 | secretName: loki-operator-metrics 30 | -------------------------------------------------------------------------------- /config/overlays/openshift/manager_related_image_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: manager 10 | env: 11 | - name: RELATED_IMAGE_LOKI 12 | value: quay.io/openshift-logging/loki:v2.4.1 13 | - name: RELATED_IMAGE_GATEWAY 14 | value: quay.io/observatorium/api:latest 15 | - name: RELATED_IMAGE_OPA 16 | value: quay.io/observatorium/opa-openshift:latest 17 | -------------------------------------------------------------------------------- /config/overlays/openshift/manager_run_flags_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: manager 10 | args: 11 | - "--with-lokistack-gateway" 12 | - "--with-lokistack-gateway-route" 13 | - "--with-cert-signing-service" 14 | - "--with-service-monitors" 15 | - "--with-tls-service-monitors" 16 | -------------------------------------------------------------------------------- /config/overlays/openshift/prometheus_service_monitor_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | name: loki-operator 6 | name: metrics-monitor 7 | spec: 8 | endpoints: 9 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 10 | path: /metrics 11 | targetPort: 8443 12 | scheme: https 13 | interval: 30s 14 | scrapeTimeout: 10s 15 | tlsConfig: 16 | caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt 17 | serverName: loki-operator-controller-manager-metrics-service.openshift-logging.svc 18 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cluster-monitoring-config 5 | namespace: openshift-monitoring 6 | data: 7 | config.yaml: | 8 | enableUserWorkload: true 9 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - logfile_metric_daemonset.yaml 3 | - logfile_metric_role.yaml 4 | - logfile_metric_role_binding.yaml 5 | - logfile_metric_scc.yaml 6 | - logfile_metric_service.yaml 7 | - logfile_metric_service_account.yaml 8 | - logfile_metric_service_monitor.yaml 9 | - storage_size_calculator_config.yaml 10 | - storage_size_calculator.yaml 11 | 12 | # Adds namespace to all resources. 13 | namespace: openshift-logging 14 | 15 | # Labels to add to all resources and selectors. 16 | # commonLabels: 17 | # someName: someValue 18 | commonLabels: 19 | app.kubernetes.io/name: storage-size-calculator 20 | app.kubernetes.io/instance: storage-size-calculator-v0.0.1 21 | app.kubernetes.io/version: "0.0.1" 22 | app.kubernetes.io/part-of: loki-operator 23 | app.kubernetes.io/managed-by: kubectl-apply 24 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/logfile_metric_daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: log-file-metric-exporter 5 | labels: 6 | name: log-file-metric-exporter 7 | spec: 8 | selector: 9 | matchLabels: 10 | name: log-file-metric-exporter 11 | template: 12 | metadata: 13 | labels: 14 | name: log-file-metric-exporter 15 | spec: 16 | nodeSelector: 17 | kubernetes.io/os: linux 18 | containers: 19 | - name: log-file-metric-exporter 20 | image: quay.io/openshift-logging/log-file-metric-exporter:latest 21 | imagePullPolicy: IfNotPresent 22 | command: 23 | - /usr/local/bin/log-file-metric-exporter 24 | - -verbosity=2 25 | - -dir=/var/log/containers 26 | - -http=:2112 27 | - -keyFile=/var/run/secrets/serving-cert/tls.key 28 | - -crtFile=/var/run/secrets/serving-cert/tls.crt 29 | ports: 30 | - containerPort: 2112 31 | name: logfile-metrics 32 | protocol: TCP 33 | volumeMounts: 34 | - mountPath: /var/run/secrets/serving-cert 35 | name: log-file-metric-exporter-metrics 36 | - mountPath: /var/log 37 | name: logfile-varlog 38 | securityContext: 39 | seLinuxOptions: 40 | type: spc_t 41 | readOnlyRootFilesystem: true 42 | allowPrivilegeEscalation: false 43 | serviceAccount: log-file-metric-exporter 44 | volumes: 45 | - name: log-file-metric-exporter-metrics 46 | secret: 47 | defaultMode: 420 48 | optional: true 49 | secretName: log-file-metric-exporter-metrics 50 | - name: logfile-varlog 51 | hostPath: 52 | path: /var/log 53 | - name: storage-size-calculator-ca-bundle 54 | configMap: 55 | name: storage-size-calculator-ca-bundle 56 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/logfile_metric_role.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: log-file-metric-exporter-privileged 5 | rules: 6 | - verbs: 7 | - use 8 | apiGroups: 9 | - security.openshift.io 10 | resources: 11 | - securitycontextconstraints 12 | resourceNames: 13 | - log-file-metric-exporter-scc 14 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/logfile_metric_role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: log-file-metric-exporter-privileged-binding 5 | subjects: 6 | - kind: ServiceAccount 7 | name: log-file-metric-exporter 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: log-file-metric-exporter-privileged 12 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/logfile_metric_scc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: security.openshift.io/v1 2 | kind: SecurityContextConstraints 3 | metadata: 4 | name: log-file-metric-exporter-scc 5 | allowPrivilegedContainer: true 6 | requiredDropCapabilities: 7 | - MKNOD 8 | - CHOWN 9 | - DAC_OVERRIDE 10 | - FSETID 11 | - FOWNER 12 | - SETGID 13 | - SETUID 14 | - SETPCAP 15 | - NET_BIND_SERVICE 16 | - KILL 17 | allowHostDirVolumePlugin: true 18 | allowHostPorts: false 19 | runAsUser: 20 | type: RunAsAny 21 | users: [] 22 | allowHostIPC: false 23 | seLinuxContext: 24 | type: RunAsAny 25 | readOnlyRootFilesystem: false 26 | fsGroup: 27 | type: RunAsAny 28 | groups: 29 | - 'system:cluster-admins' 30 | defaultAddCapabilities: null 31 | supplementalGroups: 32 | type: RunAsAny 33 | volumes: 34 | - configMap 35 | - downwardAPI 36 | - emptyDir 37 | - persistentVolumeClaim 38 | - projected 39 | - secret 40 | allowHostPID: false 41 | allowHostNetwork: false 42 | allowPrivilegeEscalation: true 43 | allowedCapabilities: null 44 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/logfile_metric_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: log-file-metric-exporter-metrics 5 | labels: 6 | name: log-file-metric-exporter 7 | annotations: 8 | service.beta.openshift.io/serving-cert-secret-name: log-file-metric-exporter-metrics 9 | spec: 10 | ports: 11 | - name: logfile-metrics 12 | port: 2112 13 | protocol: TCP 14 | targetPort: logfile-metrics 15 | selector: 16 | name: log-file-metric-exporter 17 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/logfile_metric_service_account.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceAccount 2 | apiVersion: v1 3 | metadata: 4 | name: log-file-metric-exporter 5 | secrets: 6 | - name: logfile-metric-dockercfg 7 | - name: logfile-metric-token 8 | imagePullSecrets: 9 | - name: logfile-metric-dockercfg 10 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/logfile_metric_service_monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: monitor-log-file-metric-exporter 5 | labels: 6 | name: log-file-metric-exporter 7 | spec: 8 | selector: 9 | matchLabels: 10 | name: log-file-metric-exporter 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | path: /metrics 14 | port: logfile-metrics 15 | scheme: https 16 | interval: 30s 17 | scrapeTimeout: 10s 18 | tlsConfig: 19 | caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt 20 | serverName: log-file-metric-exporter-metrics.openshift-logging.svc 21 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/storage_size_calculator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: storage-size-calculator 5 | spec: 6 | selector: 7 | matchLabels: 8 | name: log-file-metric-exporter 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | name: log-file-metric-exporter 14 | spec: 15 | containers: 16 | - command: 17 | - /size-calculator 18 | image: quay.io/openshift-logging/storage-size-calculator:latest 19 | imagePullPolicy: Always 20 | name: size-calculator 21 | ports: 22 | - containerPort: 2112 23 | name: logfile-metrics 24 | securityContext: 25 | allowPrivilegeEscalation: false 26 | env: 27 | - name: PROMETHEUS_URL 28 | valueFrom: 29 | secretKeyRef: 30 | name: promsecret 31 | key: prometheus_url 32 | - name: PROMETHEUS_TOKEN 33 | valueFrom: 34 | secretKeyRef: 35 | name: promsecret 36 | key: prometheus_token 37 | terminationGracePeriodSeconds: 10 38 | serviceAccount: log-file-metric-exporter 39 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/storage_size_calculator_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: storage-size-calculator-ca-bundle 5 | annotations: 6 | "service.beta.openshift.io/inject-cabundle": "true" 7 | -------------------------------------------------------------------------------- /config/overlays/openshift/size-calculator/user_workload_monitoring_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: user-workload-monitoring-config 5 | namespace: openshift-user-workload-monitoring 6 | data: 7 | config.yaml: | 8 | prometheus: 9 | retention: 1h 10 | -------------------------------------------------------------------------------- /config/overlays/production/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../crd 3 | - ../../rbac 4 | - ../../manager 5 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 6 | # crd/kustomization.yaml 7 | #- ../webhook 8 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 9 | #- ../certmanager 10 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 11 | - ../../prometheus 12 | 13 | # Adds namespace to all resources. 14 | namespace: loki-operator 15 | 16 | # Value of this field is prepended to the 17 | # names of all resources, e.g. a deployment named 18 | # "wordpress" becomes "alices-wordpress". 19 | # Note that it should also match with the prefix (text before '-') of the namespace 20 | # field above. 21 | namePrefix: loki-operator- 22 | 23 | # Labels to add to all resources and selectors. 24 | #commonLabels: 25 | # someName: someValue 26 | commonLabels: 27 | app.kubernetes.io/name: loki-operator 28 | app.kubernetes.io/instance: loki-operator-v0.0.1 29 | app.kubernetes.io/version: "0.0.1" 30 | app.kubernetes.io/part-of: loki-operator 31 | app.kubernetes.io/managed-by: operator-lifecycle-manager 32 | 33 | patchesStrategicMerge: 34 | # Protect the /metrics endpoint by putting it behind auth. 35 | # If you want your controller-manager to expose the /metrics 36 | # endpoint w/o any authn/z, please comment the following line. 37 | - manager_auth_proxy_patch.yaml 38 | - manager_related_image_patch.yaml 39 | - manager_run_flags_patch.yaml 40 | - prometheus_service_monitor_patch.yaml 41 | 42 | images: 43 | - name: controller 44 | newName: quay.io/viaq/loki-operator 45 | newTag: v0.0.1 46 | 47 | # Mount the controller config file for loading manager configurations 48 | # through a ComponentConfig type 49 | #- manager_config_patch.yaml 50 | 51 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 52 | # crd/kustomization.yaml 53 | #- manager_webhook_patch.yaml 54 | 55 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 56 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 57 | # 'CERTMANAGER' needs to be enabled to use ca injection 58 | #- webhookcainjection_patch.yaml 59 | 60 | # the following config is for teaching kustomize how to do var substitution 61 | vars: 62 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 63 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 64 | # objref: 65 | # kind: Certificate 66 | # group: cert-manager.io 67 | # version: v1 68 | # name: serving-cert # this name should match the one in certificate.yaml 69 | # fieldref: 70 | # fieldpath: metadata.namespace 71 | #- name: CERTIFICATE_NAME 72 | # objref: 73 | # kind: Certificate 74 | # group: cert-manager.io 75 | # version: v1 76 | # name: serving-cert # this name should match the one in certificate.yaml 77 | #- name: SERVICE_NAMESPACE # namespace of the service 78 | # objref: 79 | # kind: Service 80 | # version: v1 81 | # name: webhook-service 82 | # fieldref: 83 | # fieldpath: metadata.namespace 84 | #- name: SERVICE_NAME 85 | # objref: 86 | # kind: Service 87 | # version: v1 88 | # name: webhook-service 89 | -------------------------------------------------------------------------------- /config/overlays/production/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | spec: 8 | template: 9 | spec: 10 | containers: 11 | - name: kube-rbac-proxy 12 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 13 | args: 14 | - "--secure-listen-address=0.0.0.0:8443" 15 | - "--upstream=http://127.0.0.1:8080/" 16 | - "--logtostderr=true" 17 | - "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt" 18 | - "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key" 19 | - "--v=2" 20 | ports: 21 | - containerPort: 8443 22 | name: https 23 | volumeMounts: 24 | - mountPath: /var/run/secrets/serving-cert 25 | name: loki-operator-metrics-cert 26 | volumes: 27 | - name: loki-operator-metrics-cert 28 | secret: 29 | defaultMode: 420 30 | optional: true 31 | secretName: loki-operator-metrics 32 | -------------------------------------------------------------------------------- /config/overlays/production/manager_related_image_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: manager 10 | env: 11 | - name: RELATED_IMAGE_LOKI 12 | value: docker.io/grafana/loki:2.4.1 13 | - name: RELATED_IMAGE_GATEWAY 14 | value: quay.io/observatorium/api:latest 15 | -------------------------------------------------------------------------------- /config/overlays/production/manager_run_flags_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: manager 10 | args: 11 | - "--with-lokistack-gateway" 12 | -------------------------------------------------------------------------------- /config/overlays/production/prometheus_service_monitor_patch.yaml: -------------------------------------------------------------------------------- 1 | # Prometheus Monitor Service (Metrics) 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | labels: 6 | name: loki-operator 7 | name: metrics-monitor 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | path: /metrics 12 | targetPort: 8443 13 | scheme: https 14 | interval: 30s 15 | scrapeTimeout: 10s 16 | tlsConfig: 17 | caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt 18 | serverName: loki-operator-controller-manager-metrics-service.loki-operator.svc 19 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | name: loki-operator 8 | name: metrics-monitor 9 | spec: 10 | selector: 11 | matchLabels: 12 | app.kubernetes.io/name: loki-operator 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: ["/metrics"] 7 | verbs: ["get"] 8 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.openshift.io/serving-cert-secret-name: loki-operator-metrics 6 | labels: 7 | name: controller-manager-metrics-service 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | protocol: TCP 13 | targetPort: https 14 | selector: 15 | name: loki-operator-controller-manager 16 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | - auth_proxy_service.yaml 7 | - auth_proxy_role.yaml 8 | - auth_proxy_role_binding.yaml 9 | - auth_proxy_client_clusterrole.yaml 10 | - prometheus_role.yaml 11 | - prometheus_role_binding.yaml 12 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | - coordination.k8s.io 10 | resources: 11 | - configmaps 12 | - leases 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - create 18 | - update 19 | - patch 20 | - delete 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - events 25 | verbs: 26 | - create 27 | - patch 28 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/lokistack_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit lokistacks. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: lokistack-editor-role 6 | rules: 7 | - apiGroups: 8 | - loki.openshift.io 9 | resources: 10 | - lokistacks 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - loki.openshift.io 21 | resources: 22 | - lokistacks/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/lokistack_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view lokistacks. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: lokistack-viewer-role 6 | rules: 7 | - apiGroups: 8 | - loki.openshift.io 9 | resources: 10 | - lokistacks 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - loki.openshift.io 17 | resources: 18 | - lokistacks/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/prometheus_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | annotations: 5 | include.release.openshift.io/self-managed-high-availability: "true" 6 | include.release.openshift.io/single-node-developer: "true" 7 | name: prometheus 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - services 13 | - endpoints 14 | - pods 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | -------------------------------------------------------------------------------- /config/rbac/prometheus_role_binding.yaml: -------------------------------------------------------------------------------- 1 | # Grant cluster-monitoring access to openshift-operators-redhat metrics 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: prometheus 6 | annotations: 7 | include.release.openshift.io/self-managed-high-availability: "true" 8 | include.release.openshift.io/single-node-developer: "true" 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: prometheus 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-k8s 16 | namespace: openshift-monitoring 17 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: manager-role 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | - endpoints 14 | - nodes 15 | - pods 16 | - serviceaccounts 17 | - services 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - "" 28 | resources: 29 | - secrets 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - apiGroups: 35 | - apps 36 | resources: 37 | - deployments 38 | - statefulsets 39 | verbs: 40 | - create 41 | - delete 42 | - get 43 | - list 44 | - patch 45 | - update 46 | - watch 47 | - apiGroups: 48 | - config.openshift.io 49 | resources: 50 | - dnses 51 | verbs: 52 | - get 53 | - list 54 | - watch 55 | - apiGroups: 56 | - coordination.k8s.io 57 | resources: 58 | - leases 59 | verbs: 60 | - create 61 | - get 62 | - update 63 | - apiGroups: 64 | - loki.openshift.io 65 | resources: 66 | - lokistacks 67 | verbs: 68 | - create 69 | - delete 70 | - get 71 | - list 72 | - patch 73 | - update 74 | - watch 75 | - apiGroups: 76 | - loki.openshift.io 77 | resources: 78 | - lokistacks/finalizers 79 | verbs: 80 | - update 81 | - apiGroups: 82 | - loki.openshift.io 83 | resources: 84 | - lokistacks/status 85 | verbs: 86 | - get 87 | - patch 88 | - update 89 | - apiGroups: 90 | - monitoring.coreos.com 91 | resources: 92 | - servicemonitors 93 | verbs: 94 | - create 95 | - get 96 | - list 97 | - update 98 | - watch 99 | - apiGroups: 100 | - networking.k8s.io 101 | resources: 102 | - ingresses 103 | verbs: 104 | - create 105 | - get 106 | - list 107 | - update 108 | - watch 109 | - apiGroups: 110 | - rbac.authorization.k8s.io 111 | resources: 112 | - clusterrolebindings 113 | - clusterroles 114 | verbs: 115 | - create 116 | - delete 117 | - get 118 | - list 119 | - patch 120 | - update 121 | - watch 122 | - apiGroups: 123 | - route.openshift.io 124 | resources: 125 | - routes 126 | verbs: 127 | - create 128 | - get 129 | - list 130 | - update 131 | - watch 132 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - loki_v1beta1_lokistack.yaml 4 | # +kubebuilder:scaffold:manifestskustomizesamples 5 | -------------------------------------------------------------------------------- /config/samples/loki_v1beta1_lokistack.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: loki.openshift.io/v1beta1 2 | kind: LokiStack 3 | metadata: 4 | name: lokistack-sample 5 | spec: 6 | size: 1x.small 7 | replicationFactor: 2 8 | storage: 9 | secret: 10 | name: test 11 | storageClassName: standard 12 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | patchesJson6902: 4 | - path: patches/basic.config.yaml 5 | target: 6 | group: scorecard.operatorframework.io 7 | version: v1alpha3 8 | kind: Configuration 9 | name: config 10 | - path: patches/olm.config.yaml 11 | target: 12 | group: scorecard.operatorframework.io 13 | version: v1alpha3 14 | kind: Configuration 15 | name: config 16 | # +kubebuilder:scaffold:patchesJson6902 17 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.4.0 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - olm-bundle-validation 7 | image: quay.io/operator-framework/scorecard-test:v1.4.0 8 | labels: 9 | suite: olm 10 | test: olm-bundle-validation-test 11 | - op: add 12 | path: /stages/0/tests/- 13 | value: 14 | entrypoint: 15 | - scorecard-test 16 | - olm-crds-have-validation 17 | image: quay.io/operator-framework/scorecard-test:v1.4.0 18 | labels: 19 | suite: olm 20 | test: olm-crds-have-validation-test 21 | - op: add 22 | path: /stages/0/tests/- 23 | value: 24 | entrypoint: 25 | - scorecard-test 26 | - olm-crds-have-resources 27 | image: quay.io/operator-framework/scorecard-test:v1.4.0 28 | labels: 29 | suite: olm 30 | test: olm-crds-have-resources-test 31 | - op: add 32 | path: /stages/0/tests/- 33 | value: 34 | entrypoint: 35 | - scorecard-test 36 | - olm-spec-descriptors 37 | image: quay.io/operator-framework/scorecard-test:v1.4.0 38 | labels: 39 | suite: olm 40 | test: olm-spec-descriptors-test 41 | - op: add 42 | path: /stages/0/tests/- 43 | value: 44 | entrypoint: 45 | - scorecard-test 46 | - olm-status-descriptors 47 | image: quay.io/operator-framework/scorecard-test:v1.4.0 48 | labels: 49 | suite: olm 50 | test: olm-status-descriptors-test 51 | -------------------------------------------------------------------------------- /controllers/internal/management/state/state.go: -------------------------------------------------------------------------------- 1 | package state 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/kverrors" 7 | "github.com/ViaQ/logerr/log" 8 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 9 | "github.com/ViaQ/loki-operator/internal/external/k8s" 10 | 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | ) 15 | 16 | // IsManaged checks if the custom resource is configured with ManagementState Managed. 17 | func IsManaged(ctx context.Context, req ctrl.Request, k k8s.Client) (bool, error) { 18 | ll := log.WithValues("lokistack", req.NamespacedName) 19 | 20 | var stack lokiv1beta1.LokiStack 21 | if err := k.Get(ctx, req.NamespacedName, &stack); err != nil { 22 | if apierrors.IsNotFound(err) { 23 | // maybe the user deleted it before we could react? Either way this isn't an issue 24 | ll.Error(err, "could not find the requested loki stack", "name", req.NamespacedName) 25 | return false, nil 26 | } 27 | return false, kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) 28 | } 29 | return stack.Spec.ManagementState == lokiv1beta1.ManagementStateManaged, nil 30 | } 31 | -------------------------------------------------------------------------------- /controllers/internal/management/state/state_test.go: -------------------------------------------------------------------------------- 1 | package state_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/ViaQ/logerr/kverrors" 8 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 9 | "github.com/ViaQ/loki-operator/controllers/internal/management/state" 10 | "github.com/ViaQ/loki-operator/internal/external/k8s/k8sfakes" 11 | "github.com/stretchr/testify/require" 12 | 13 | apierrors "k8s.io/apimachinery/pkg/api/errors" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/runtime/schema" 16 | "k8s.io/apimachinery/pkg/types" 17 | 18 | ctrl "sigs.k8s.io/controller-runtime" 19 | "sigs.k8s.io/controller-runtime/pkg/client" 20 | ) 21 | 22 | func TestIsManaged(t *testing.T) { 23 | type test struct { 24 | name string 25 | stack lokiv1beta1.LokiStack 26 | wantOk bool 27 | } 28 | 29 | k := &k8sfakes.FakeClient{} 30 | r := ctrl.Request{ 31 | NamespacedName: types.NamespacedName{ 32 | Name: "my-stack", 33 | Namespace: "some-ns", 34 | }, 35 | } 36 | table := []test{ 37 | { 38 | name: "managed", 39 | stack: lokiv1beta1.LokiStack{ 40 | TypeMeta: metav1.TypeMeta{ 41 | Kind: "LokiStack", 42 | }, 43 | ObjectMeta: metav1.ObjectMeta{ 44 | Name: "my-stack", 45 | Namespace: "some-ns", 46 | UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", 47 | }, 48 | Spec: lokiv1beta1.LokiStackSpec{ 49 | ManagementState: lokiv1beta1.ManagementStateManaged, 50 | }, 51 | }, 52 | wantOk: true, 53 | }, 54 | { 55 | name: "unmanaged", 56 | stack: lokiv1beta1.LokiStack{ 57 | TypeMeta: metav1.TypeMeta{ 58 | Kind: "LokiStack", 59 | }, 60 | ObjectMeta: metav1.ObjectMeta{ 61 | Name: "my-stack", 62 | Namespace: "some-ns", 63 | UID: "b23f9a38-9672-499f-8c29-15ede74d3ece", 64 | }, 65 | Spec: lokiv1beta1.LokiStackSpec{ 66 | ManagementState: lokiv1beta1.ManagementStateUnmanaged, 67 | }, 68 | }, 69 | }, 70 | } 71 | for _, tst := range table { 72 | t.Run(tst.name, func(t *testing.T) { 73 | k.GetStub = func(_ context.Context, _ types.NamespacedName, object client.Object) error { 74 | k.SetClientObject(object, &tst.stack) 75 | return nil 76 | } 77 | ok, err := state.IsManaged(context.TODO(), r, k) 78 | require.NoError(t, err) 79 | require.Equal(t, ok, tst.wantOk) 80 | }) 81 | } 82 | } 83 | 84 | func TestIsManaged_WhenError_ReturnNotManagedWithError(t *testing.T) { 85 | type test struct { 86 | name string 87 | apierror error 88 | wantErr error 89 | } 90 | 91 | badReqErr := apierrors.NewBadRequest("bad request") 92 | k := &k8sfakes.FakeClient{} 93 | r := ctrl.Request{ 94 | NamespacedName: types.NamespacedName{ 95 | Name: "my-stack", 96 | Namespace: "some-ns", 97 | }, 98 | } 99 | table := []test{ 100 | { 101 | name: "stack not found error", 102 | apierror: apierrors.NewNotFound(schema.GroupResource{}, "something not found"), 103 | }, 104 | { 105 | name: "any other api error", 106 | apierror: badReqErr, 107 | wantErr: kverrors.Wrap(badReqErr, "failed to lookup lokistack", "name", r.NamespacedName), 108 | }, 109 | } 110 | for _, tst := range table { 111 | t.Run(tst.name, func(t *testing.T) { 112 | k.GetReturns(tst.apierror) 113 | ok, err := state.IsManaged(context.TODO(), r, k) 114 | require.Equal(t, tst.wantErr, err) 115 | require.False(t, ok) 116 | }) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /controllers/lokistack_controller_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "flag" 5 | "io/ioutil" 6 | "os" 7 | "testing" 8 | 9 | "github.com/ViaQ/logerr/log" 10 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 11 | "github.com/ViaQ/loki-operator/internal/external/k8s/k8sfakes" 12 | "github.com/ViaQ/loki-operator/internal/manifests" 13 | routev1 "github.com/openshift/api/route/v1" 14 | "github.com/stretchr/testify/require" 15 | 16 | appsv1 "k8s.io/api/apps/v1" 17 | corev1 "k8s.io/api/core/v1" 18 | networkingv1 "k8s.io/api/networking/v1" 19 | rbacv1 "k8s.io/api/rbac/v1" 20 | "k8s.io/apimachinery/pkg/runtime" 21 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 22 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 23 | "sigs.k8s.io/controller-runtime/pkg/builder" 24 | "sigs.k8s.io/controller-runtime/pkg/client" 25 | ) 26 | 27 | var scheme = runtime.NewScheme() 28 | 29 | func TestMain(m *testing.M) { 30 | testing.Init() 31 | flag.Parse() 32 | 33 | if testing.Verbose() { 34 | // set to the highest for verbose testing 35 | log.SetLogLevel(5) 36 | } else { 37 | if err := log.SetOutput(ioutil.Discard); err != nil { 38 | // This would only happen if the default logger was changed which it hasn't so 39 | // we can assume that a panic is necessary and the developer is to blame. 40 | panic(err) 41 | } 42 | } 43 | 44 | // Register the clientgo and CRD schemes 45 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 46 | utilruntime.Must(routev1.AddToScheme(scheme)) 47 | utilruntime.Must(lokiv1beta1.AddToScheme(scheme)) 48 | 49 | log.Init("testing") 50 | os.Exit(m.Run()) 51 | } 52 | 53 | func TestLokiStackController_RegistersCustomResourceForCreateOrUpdate(t *testing.T) { 54 | b := &k8sfakes.FakeBuilder{} 55 | k := &k8sfakes.FakeClient{} 56 | c := &LokiStackReconciler{Client: k, Scheme: scheme} 57 | 58 | b.ForReturns(b) 59 | b.OwnsReturns(b) 60 | 61 | err := c.buildController(b) 62 | require.NoError(t, err) 63 | 64 | // Require only one For-Call for the custom resource 65 | require.Equal(t, 1, b.ForCallCount()) 66 | 67 | // Require For-call options to have create and update predicates 68 | obj, opts := b.ForArgsForCall(0) 69 | require.Equal(t, &lokiv1beta1.LokiStack{}, obj) 70 | require.Equal(t, opts[0], createOrUpdateOnlyPred) 71 | } 72 | 73 | func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *testing.T) { 74 | k := &k8sfakes.FakeClient{} 75 | 76 | // Require owned resources 77 | type test struct { 78 | obj client.Object 79 | index int 80 | flags manifests.FeatureFlags 81 | pred builder.OwnsOption 82 | } 83 | table := []test{ 84 | { 85 | obj: &corev1.ConfigMap{}, 86 | index: 0, 87 | pred: updateOrDeleteOnlyPred, 88 | }, 89 | { 90 | obj: &corev1.ServiceAccount{}, 91 | index: 1, 92 | pred: updateOrDeleteOnlyPred, 93 | }, 94 | { 95 | obj: &corev1.Service{}, 96 | index: 2, 97 | pred: updateOrDeleteOnlyPred, 98 | }, 99 | { 100 | obj: &appsv1.Deployment{}, 101 | index: 3, 102 | pred: updateOrDeleteOnlyPred, 103 | }, 104 | { 105 | obj: &appsv1.StatefulSet{}, 106 | index: 4, 107 | pred: updateOrDeleteOnlyPred, 108 | }, 109 | { 110 | obj: &rbacv1.ClusterRole{}, 111 | index: 5, 112 | pred: updateOrDeleteOnlyPred, 113 | }, 114 | { 115 | obj: &rbacv1.ClusterRoleBinding{}, 116 | index: 6, 117 | pred: updateOrDeleteOnlyPred, 118 | }, 119 | { 120 | obj: &networkingv1.Ingress{}, 121 | index: 7, 122 | flags: manifests.FeatureFlags{ 123 | EnableGatewayRoute: false, 124 | }, 125 | pred: updateOrDeleteOnlyPred, 126 | }, 127 | { 128 | obj: &routev1.Route{}, 129 | index: 7, 130 | flags: manifests.FeatureFlags{ 131 | EnableGatewayRoute: true, 132 | }, 133 | pred: updateOrDeleteOnlyPred, 134 | }, 135 | } 136 | for _, tst := range table { 137 | b := &k8sfakes.FakeBuilder{} 138 | b.ForReturns(b) 139 | b.OwnsReturns(b) 140 | 141 | c := &LokiStackReconciler{Client: k, Scheme: scheme, Flags: tst.flags} 142 | err := c.buildController(b) 143 | require.NoError(t, err) 144 | 145 | // Require Owns-Calls for all owned resources 146 | require.Equal(t, 8, b.OwnsCallCount()) 147 | 148 | // Require Owns-call options to have delete predicate only 149 | obj, opts := b.OwnsArgsForCall(tst.index) 150 | require.Equal(t, tst.obj, obj) 151 | require.Equal(t, tst.pred, opts[0]) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /docs/storage_size_calculator.md: -------------------------------------------------------------------------------- 1 | # Install Storage Size Calculator on OpenShift 2 | 3 | This document demonstrates how to install storage size calculator for loki on OpenShift. 4 | 5 | _Note:_ The storage size calculator works out of the box on OpenShift. 6 | 7 | ## Introduction 8 | 9 | Storage Size Calculator is used to have an idea on how to properly size a Loki cluster. It spins up a log collector that is used to collect metrics for a period of time and based on the amount of logs being collected, extrapolate the amount of logs required for a day and from that recommend a t-shirt size. 10 | 11 | ## Requirements 12 | 13 | * Create a running OpenShift cluster. 14 | * A container registry that you and your OpenShift cluster can reach. We recommend [quay.io](https://quay.io/signin/). 15 | 16 | ## Installation 17 | 18 | * Deploy the [Loki Operator](https://github.com/ViaQ/loki-operator/blob/master/docs/hack_loki_operator.md#hacking-on-loki-operator-on-openshift) to the cluster. 19 | 20 | * Deploy the storage size calculator by executing following command in the terminal: 21 | 22 | ```console 23 | make deploy-size-calculator REGISTRY_ORG=$YOUR_QUAY_ORG 24 | ``` 25 | 26 | where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you can push container images. 27 | 28 | You should see `log-file-metric-exporter-xxx` and `storage-size-calculator-xxx` pods running. 29 | 30 | _Note:_ [log-file-metric-exporter](https://github.com/ViaQ/log-file-metric-exporter) is used to collect metrics about container logs being produced in a kubernetes environment. It publishes `log_logged_bytes_total` metric to prometheus. 31 | 32 | * Now you can check the logs to see the recommended t-shirt size for your cluster: 33 | 34 | ```console 35 | kubectl -n openshift-logging logs 36 | ``` 37 | 38 | where `` is the name of the storage size calculator pod and can be found using: 39 | 40 | ```console 41 | kubectl -n openshift-logging get pods 42 | ``` 43 | 44 | _Note:_ The storage size calculator logs the recommended t-shirt size every minute. 45 | 46 | ## Cleanup 47 | 48 | To cleanup the deployment you can use: 49 | 50 | ```console 51 | make undeploy-size-calculator 52 | ``` 53 | 54 | This will cleanup the resources related to storage size calculator. However, the Loki Operator would still be running. 55 | 56 | ## Contribution 57 | 58 | If you want to contribute to the storage size calculator, you can follow this local development and testing process. 59 | 60 | * Fork and clone the [Loki Operator](https://github.com/ViaQ/loki-operator) repo. 61 | 62 | * All the files related to storage size calculator exists at [`config/overlays/openshift/size-calculator`](https://github.com/ViaQ/loki-operator/tree/master/config/overlays/openshift/size-calculator) and the main file is at [`cmd/size-calculator`](https://github.com/ViaQ/loki-operator/tree/master/cmd/size-calculator). 63 | 64 | * Update the code to fix a bug or add a new feature. 65 | 66 | * To test the changes made, build the image and push it to quay. Replace [here](https://github.com/ViaQ/loki-operator/blob/master/config/overlays/openshift/size-calculator/storage_size_calculator.yaml#L18) with your quay image to test the changes. 67 | 68 | Build the image using: 69 | 70 | ```console 71 | make oci-build-calculator 72 | ``` 73 | 74 | This will build the storage size calculator image using [dockerfile](https://github.com/ViaQ/loki-operator/blob/master/calculator.Dockerfile) 75 | 76 | Push the image to quay using: 77 | 78 | ```console 79 | make oci-push-calculator 80 | ``` 81 | 82 | After replacing the image name, deploy the storage size calculator to test your changes: 83 | 84 | ```console 85 | make deploy-size-calculator REGISTRY_ORG=$YOUR_QUAY_ORG 86 | ``` 87 | 88 | where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you pushed your container image. 89 | 90 | * If everything works fine then create a pull request. 91 | 92 | ## Troubleshooting 93 | 94 | ### Permission denied on deploying prometheus secret 95 | 96 | If you get `permission denied` error while running `make deploy-size-calculator` then make [this](https://github.com/ViaQ/loki-operator/blob/master/hack/deploy-prometheus-secret.sh) file executable by running: 97 | 98 | ```console 99 | chmod +x hack/deploy-prometheus-secret.sh 100 | ``` 101 | 102 | Now rerun the same `make deploy-size-calculator` again and it should work fine. 103 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ViaQ/loki-operator 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/ViaQ/logerr v1.0.10 7 | github.com/go-logr/logr v0.4.0 8 | github.com/google/uuid v1.1.2 9 | github.com/imdario/mergo v0.3.12 10 | github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 11 | github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d // release-4.9 12 | github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0 13 | github.com/prometheus/client_golang v1.11.0 14 | github.com/prometheus/common v0.32.0 15 | github.com/stretchr/testify v1.7.0 16 | k8s.io/api v0.22.1 17 | k8s.io/apimachinery v0.22.1 18 | k8s.io/client-go v0.22.1 19 | k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 20 | sigs.k8s.io/controller-runtime v0.9.2 21 | sigs.k8s.io/yaml v1.2.0 22 | ) 23 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /hack/deploy-example-secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eou pipefail 4 | 5 | NAMESPACE=$1 6 | 7 | REGION="" 8 | ENDPOINT="" 9 | ACCESS_KEY_ID="" 10 | SECRET_ACCESS_KEY="" 11 | LOKI_BUCKET_NAME="${LOKI_BUCKET_NAME:-loki}" 12 | 13 | set_credentials_from_aws() { 14 | REGION="$(aws configure get region)" 15 | ACCESS_KEY_ID="$(aws configure get aws_access_key_id)" 16 | SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" 17 | ENDPOINT="https://s3.${REGION}.amazonaws.com" 18 | } 19 | 20 | create_secret() { 21 | kubectl -n $NAMESPACE delete secret test ||: 22 | kubectl -n $NAMESPACE create secret generic test \ 23 | --from-literal=endpoint=$(echo -n "$ENDPOINT") \ 24 | --from-literal=region=$(echo -n "$REGION") \ 25 | --from-literal=bucketnames=$(echo -n "$LOKI_BUCKET_NAME") \ 26 | --from-literal=access_key_id=$(echo -n "$ACCESS_KEY_ID") \ 27 | --from-literal=access_key_secret=$(echo -n "$SECRET_ACCESS_KEY") 28 | } 29 | 30 | main() { 31 | set_credentials_from_aws 32 | create_secret 33 | } 34 | 35 | main 36 | -------------------------------------------------------------------------------- /hack/deploy-prometheus-secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eou pipefail 4 | 5 | USER_WORKLOAD_NAMESPACE=openshift-user-workload-monitoring 6 | NAMESPACE=openshift-logging 7 | 8 | secret=$(kubectl -n $USER_WORKLOAD_NAMESPACE get secret | grep prometheus-user-workload-token | head -n 1 | awk '{print $1 }') 9 | PROMETHEUS_URL="https://$(kubectl -n openshift-monitoring get route thanos-querier -o json | jq -r '.spec.host')" 10 | PROMETHEUS_TOKEN=$(kubectl -n $USER_WORKLOAD_NAMESPACE get secret $secret -o json | jq -r '.data.token' | base64 -d) 11 | 12 | kubectl -n $NAMESPACE delete secret promsecret ||: 13 | kubectl -n $NAMESPACE create secret generic promsecret \ 14 | --from-literal=prometheus_url=$(echo -n "$PROMETHEUS_URL") \ 15 | --from-literal=prometheus_token=$(echo -n "$PROMETHEUS_TOKEN") 16 | -------------------------------------------------------------------------------- /hack/lokistack_dev.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: loki.openshift.io/v1beta1 2 | kind: LokiStack 3 | metadata: 4 | name: lokistack-dev 5 | spec: 6 | size: 1x.extra-small 7 | replicationFactor: 1 8 | storage: 9 | secret: 10 | name: test 11 | storageClassName: standard 12 | -------------------------------------------------------------------------------- /hack/lokistack_gateway_dev.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: loki.openshift.io/v1beta1 2 | kind: LokiStack 3 | metadata: 4 | name: lokistack-dev 5 | spec: 6 | size: 1x.extra-small 7 | replicationFactor: 1 8 | storage: 9 | secret: 10 | name: test 11 | storageClassName: gp2 12 | tenants: 13 | mode: static 14 | authentication: 15 | - tenantName: tenant-a 16 | tenantId: test 17 | oidc: 18 | secret: 19 | name: test1 20 | issuerURL: https://127.0.0.1:5556/dex 21 | redirectURL: https://localhost:8443/oidc/tenant-a/callback 22 | usernameClaim: test 23 | groupClaim: test 24 | authorization: 25 | roleBindings: 26 | - name: tenant-a 27 | roles: 28 | - read-write 29 | subjects: 30 | - kind: user 31 | name: admin@example.com 32 | roles: 33 | - name: read-write 34 | permissions: 35 | - read 36 | - write 37 | resources: 38 | - metrics 39 | tenants: 40 | - tenant-a 41 | -------------------------------------------------------------------------------- /hack/lokistack_gateway_ocp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: loki.openshift.io/v1beta1 2 | kind: LokiStack 3 | metadata: 4 | name: lokistack-dev 5 | spec: 6 | size: 1x.extra-small 7 | replicationFactor: 1 8 | storage: 9 | secret: 10 | name: test 11 | storageClassName: gp2 12 | tenants: 13 | mode: openshift-logging 14 | -------------------------------------------------------------------------------- /img/loki-operator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViaQ/loki-operator/8c8647d81953a8c948f5d66c0aa5dedce654c13e/img/loki-operator.png -------------------------------------------------------------------------------- /index.md: -------------------------------------------------------------------------------- 1 | ## Welcome to Loki Operator 2 | 3 | This is the Kubernetes Operator for Loki provided by the Red Hat OpenShift engineering team. This is currently a work in progress and is subject to large scale changes that will break any dependencies. Do not use this in any production environment. 4 | 5 | ### Hacking on Loki Operator on kind or OpenShift 6 | 7 | * If you want to contribute to this repository, you might need a step-by-step guide on how to start [hacking on Loki-operator with kind](https://github.com/ViaQ/loki-operator/blob/master/docs/hack_loki_operator.md#hacking-on-loki-operator-using-kind). 8 | * Also, there is a step-by-step guide on how to test Loki-operator on [OpenShift](https://github.com/ViaQ/loki-operator/blob/master/docs/hack_loki_operator.md#hacking-on-loki-operator-on-openshift). 9 | * There is also a [basic troubleshooting guide](https://github.com/ViaQ/loki-operator/blob/master/docs/hack_loki_operator.md#basic-troubleshooting-on-hacking-on-loki-operator) if you run into some common problems. 10 | * There is also a [document](https://github.com/ViaQ/loki-operator/blob/master/docs/hack_operator_make_run.md) which demonstrates how to use Loki Operator for development and testing locally without deploying the operator each time on Kind and OpenShift using the `make run` command. 11 | 12 | ### Sending Logs to Loki through the Gateway Component 13 | 14 | * The [forwarding logs to LokiStack guide](https://github.com/ViaQ/loki-operator/tree/master/docs/forwarding_logs_to_gateway.md) provides instructions for configuring forwarding clients to ship logs to Loki through the gateway component. 15 | * This section details [how to connect a Promtail](https://github.com/ViaQ/loki-operator/tree/master/docs/forwarding_logs_to_gateway.md#promtail) installation to the gateway. 16 | * This section details [how to connect a Grafana Fluentd plugin](https://github.com/ViaQ/loki-operator/tree/master/docs/forwarding_logs_to_gateway.md#fluentd) installation to the gateway. 17 | 18 | ### Installation of Storage Size Calculator on OpenShift 19 | 20 | * Storage size calculator works out of the box on OpenShift. For non-openshift distributions you will need to create services like prometheus, serviceMonitor, scrape configuration for log-file-metric exporter, promsecret to access the custom prometheus URL, token. 21 | * The step-by-step guide on how to install [storage size calculator](https://github.com/ViaQ/loki-operator/blob/master/docs/storage_size_calculator.md) on OpenShift is available. 22 | * Also, there is a step-by-step guide on how to [contribute](https://github.com/ViaQ/loki-operator/blob/master/docs/storage_size_calculator.md#contribution) to this along with local development and testing procedure. 23 | * There is also a [basic troubleshooting guide](https://github.com/ViaQ/loki-operator/blob/master/docs/storage_size_calculator.md#troubleshooting) if you run into some common problems. 24 | -------------------------------------------------------------------------------- /internal/external/k8s/builder.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "github.com/go-logr/logr" 5 | "sigs.k8s.io/controller-runtime/pkg/builder" 6 | "sigs.k8s.io/controller-runtime/pkg/client" 7 | "sigs.k8s.io/controller-runtime/pkg/controller" 8 | "sigs.k8s.io/controller-runtime/pkg/predicate" 9 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 10 | ) 11 | 12 | // Builder is a controller-runtime interface used internally. It copies function from 13 | // sigs.k8s.io/controller-runtime/pkg/builder 14 | // 15 | //counterfeiter:generate . Builder 16 | type Builder interface { 17 | For(object client.Object, opts ...builder.ForOption) Builder 18 | Owns(object client.Object, opts ...builder.OwnsOption) Builder 19 | WithEventFilter(p predicate.Predicate) Builder 20 | WithOptions(options controller.Options) Builder 21 | WithLogger(log logr.Logger) Builder 22 | Named(name string) Builder 23 | Complete(r reconcile.Reconciler) error 24 | Build(r reconcile.Reconciler) (controller.Controller, error) 25 | } 26 | 27 | type ctrlBuilder struct { 28 | bld *builder.Builder 29 | } 30 | 31 | // NewCtrlBuilder returns a self-referencing controlled builder 32 | // passthrough wrapper implementing the Builder interface above. 33 | func NewCtrlBuilder(b *builder.Builder) Builder { 34 | return &ctrlBuilder{bld: b} 35 | } 36 | 37 | func (b *ctrlBuilder) For(object client.Object, opts ...builder.ForOption) Builder { 38 | return &ctrlBuilder{bld: b.bld.For(object, opts...)} 39 | } 40 | 41 | func (b *ctrlBuilder) Owns(object client.Object, opts ...builder.OwnsOption) Builder { 42 | return &ctrlBuilder{bld: b.bld.Owns(object, opts...)} 43 | } 44 | 45 | func (b *ctrlBuilder) WithEventFilter(p predicate.Predicate) Builder { 46 | return &ctrlBuilder{bld: b.bld.WithEventFilter(p)} 47 | } 48 | 49 | func (b *ctrlBuilder) WithOptions(opts controller.Options) Builder { 50 | return &ctrlBuilder{bld: b.bld.WithOptions(opts)} 51 | } 52 | 53 | func (b *ctrlBuilder) WithLogger(log logr.Logger) Builder { 54 | return &ctrlBuilder{bld: b.bld.WithLogger(log)} 55 | } 56 | 57 | func (b *ctrlBuilder) Named(name string) Builder { 58 | return &ctrlBuilder{bld: b.bld.Named(name)} 59 | } 60 | 61 | func (b *ctrlBuilder) Complete(r reconcile.Reconciler) error { 62 | return b.bld.Complete(r) 63 | } 64 | 65 | func (b *ctrlBuilder) Build(r reconcile.Reconciler) (controller.Controller, error) { 66 | return b.bld.Build(r) 67 | } 68 | -------------------------------------------------------------------------------- /internal/external/k8s/client.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | 6 | "k8s.io/apimachinery/pkg/api/meta" 7 | "k8s.io/apimachinery/pkg/runtime" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate 13 | 14 | // Client is a kubernetes client interface used internally. It copies functions from 15 | // sigs.k8s.io/controller-runtime/pkg/client 16 | // 17 | //counterfeiter:generate . Client 18 | type Client interface { 19 | Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error 20 | Get(ctx context.Context, key client.ObjectKey, obj client.Object) error 21 | 22 | Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error 23 | Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error 24 | DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error 25 | List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error 26 | Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error 27 | 28 | RESTMapper() meta.RESTMapper 29 | Scheme() *runtime.Scheme 30 | 31 | Status() client.StatusWriter 32 | } 33 | 34 | // StatusWriter is a kubernetes status writer interface used internally. It copies functions from 35 | // sigs.k8s.io/controller-runtime/pkg/client 36 | // 37 | //counterfeiter:generate . StatusWriter 38 | type StatusWriter interface { 39 | Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error 40 | Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error 41 | } 42 | -------------------------------------------------------------------------------- /internal/external/k8s/k8sfakes/fake_client_extensions.go: -------------------------------------------------------------------------------- 1 | package k8sfakes 2 | 3 | import ( 4 | "reflect" 5 | 6 | "sigs.k8s.io/controller-runtime/pkg/client" 7 | ) 8 | 9 | // SetClientObject sets out to v. 10 | // This is primarily used within the GetStub to fake the object returned from the API to the vaule of v 11 | // 12 | // Examples: 13 | // 14 | // k.GetStub = func(_ context.Context, _ types.NamespacedName, object client.Object) error { 15 | // k.SetClientObject(object, &stack) 16 | // return nil 17 | // } 18 | func (fake *FakeClient) SetClientObject(out, v client.Object) { 19 | reflect.Indirect(reflect.ValueOf(out)).Set(reflect.ValueOf(v).Elem()) 20 | } 21 | 22 | // SetClientObjectList sets out list to v. 23 | // This is primarily used within the GetStub to fake the object returned from the API to the vaule of v 24 | // 25 | // Examples: 26 | // 27 | // k.GetStub = func(_ context.Context, _ types.NamespacedName, list client.ObjectList) error { 28 | // k.SetClientObjectList(list, &podList) 29 | // return nil 30 | // } 31 | func (fake *FakeClient) SetClientObjectList(out, v client.ObjectList) { 32 | reflect.Indirect(reflect.ValueOf(out)).Set(reflect.ValueOf(v).Elem()) 33 | } 34 | -------------------------------------------------------------------------------- /internal/handlers/internal/gateway/base_domain.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/kverrors" 7 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 8 | "github.com/ViaQ/loki-operator/internal/external/k8s" 9 | "github.com/ViaQ/loki-operator/internal/status" 10 | configv1 "github.com/openshift/api/config/v1" 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | // GetOpenShiftBaseDomain returns the cluster DNS base domain on OpenShift 17 | // clusters to auto-create redirect URLs for OpenShift Auth or an error. 18 | // If the config.openshift.io/DNS object is not found the whole lokistack 19 | // resoure is set to a degraded state. 20 | func GetOpenShiftBaseDomain(ctx context.Context, k k8s.Client, req ctrl.Request) (string, error) { 21 | var cluster configv1.DNS 22 | key := client.ObjectKey{Name: "cluster"} 23 | if err := k.Get(ctx, key, &cluster); err != nil { 24 | 25 | if apierrors.IsNotFound(err) { 26 | statusErr := status.SetDegradedCondition(ctx, k, req, 27 | "Missing cluster DNS configuration to read base domain", 28 | lokiv1beta1.ReasonMissingGatewayOpenShiftBaseDomain, 29 | ) 30 | if statusErr != nil { 31 | return "", statusErr 32 | } 33 | 34 | return "", kverrors.Wrap(err, "Missing cluster DNS configuration to read base domain") 35 | } 36 | return "", kverrors.Wrap(err, "failed to lookup lokistack gateway base domain", 37 | "name", key) 38 | } 39 | 40 | return cluster.Spec.BaseDomain, nil 41 | } 42 | -------------------------------------------------------------------------------- /internal/handlers/internal/gateway/modes.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "github.com/ViaQ/logerr/kverrors" 5 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 6 | ) 7 | 8 | // ValidateModes validates the tenants mode specification. 9 | func ValidateModes(stack lokiv1beta1.LokiStack) error { 10 | if stack.Spec.Tenants.Mode == lokiv1beta1.Static { 11 | if stack.Spec.Tenants.Authentication == nil { 12 | return kverrors.New("mandatory configuration - missing tenants' authentication configuration") 13 | } 14 | 15 | if stack.Spec.Tenants.Authorization == nil || stack.Spec.Tenants.Authorization.Roles == nil { 16 | return kverrors.New("mandatory configuration - missing roles configuration") 17 | } 18 | 19 | if stack.Spec.Tenants.Authorization == nil || stack.Spec.Tenants.Authorization.RoleBindings == nil { 20 | return kverrors.New("mandatory configuration - missing role bindings configuration") 21 | } 22 | 23 | if stack.Spec.Tenants.Authorization != nil && stack.Spec.Tenants.Authorization.OPA != nil { 24 | return kverrors.New("incompatible configuration - OPA URL not required for mode static") 25 | } 26 | } 27 | 28 | if stack.Spec.Tenants.Mode == lokiv1beta1.Dynamic { 29 | if stack.Spec.Tenants.Authentication == nil { 30 | return kverrors.New("mandatory configuration - missing tenants configuration") 31 | } 32 | 33 | if stack.Spec.Tenants.Authorization == nil || stack.Spec.Tenants.Authorization.OPA == nil { 34 | return kverrors.New("mandatory configuration - missing OPA Url") 35 | } 36 | 37 | if stack.Spec.Tenants.Authorization != nil && stack.Spec.Tenants.Authorization.Roles != nil { 38 | return kverrors.New("incompatible configuration - static roles not required for mode dynamic") 39 | } 40 | 41 | if stack.Spec.Tenants.Authorization != nil && stack.Spec.Tenants.Authorization.RoleBindings != nil { 42 | return kverrors.New("incompatible configuration - static roleBindings not required for mode dynamic") 43 | } 44 | } 45 | 46 | if stack.Spec.Tenants.Mode == lokiv1beta1.OpenshiftLogging { 47 | if stack.Spec.Tenants.Authentication != nil { 48 | return kverrors.New("incompatible configuration - custom tenants configuration not required") 49 | } 50 | 51 | if stack.Spec.Tenants.Authorization != nil { 52 | return kverrors.New("incompatible configuration - custom tenants configuration not required") 53 | } 54 | } 55 | 56 | return nil 57 | } 58 | -------------------------------------------------------------------------------- /internal/handlers/internal/gateway/tenant_configmap.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/loki-operator/internal/manifests/openshift" 7 | 8 | "github.com/ViaQ/logerr/log" 9 | 10 | "github.com/ViaQ/logerr/kverrors" 11 | "github.com/ViaQ/loki-operator/internal/manifests" 12 | corev1 "k8s.io/api/core/v1" 13 | "k8s.io/apimachinery/pkg/util/json" 14 | "sigs.k8s.io/yaml" 15 | 16 | "github.com/ViaQ/loki-operator/internal/external/k8s" 17 | ctrl "sigs.k8s.io/controller-runtime" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | ) 20 | 21 | const ( 22 | // LokiGatewayTenantFileName is the name of the tenant config file in the configmap 23 | LokiGatewayTenantFileName = "tenants.yaml" 24 | ) 25 | 26 | type tenantsConfigJSON struct { 27 | Tenants []tenantsSpec `json:"tenants,omitempty"` 28 | } 29 | 30 | type tenantsSpec struct { 31 | Name string `json:"name"` 32 | ID string `json:"id"` 33 | OpenShift *openShiftSpec `json:"openshift"` 34 | } 35 | 36 | type openShiftSpec struct { 37 | ServiceAccount string `json:"serviceAccount"` 38 | RedirectURL string `json:"redirectURL"` 39 | CookieSecret string `json:"cookieSecret"` 40 | } 41 | 42 | // GetTenantConfigMapData returns the tenantName, tenantId, cookieSecret 43 | // clusters to auto-create redirect URLs for OpenShift Auth or an error. 44 | func GetTenantConfigMapData(ctx context.Context, k k8s.Client, req ctrl.Request) map[string]openshift.TenantData { 45 | var tenantConfigMap corev1.ConfigMap 46 | key := client.ObjectKey{Name: manifests.LabelGatewayComponent, Namespace: req.Namespace} 47 | if err := k.Get(ctx, key, &tenantConfigMap); err != nil { 48 | log.Error(err, "couldn't find") 49 | return nil 50 | } 51 | 52 | tcm, err := extractTenantConfigMap(&tenantConfigMap) 53 | if err != nil { 54 | log.Error(err, "error occurred in extracting tenants.yaml configMap.") 55 | return nil 56 | } 57 | 58 | tcmMap := make(map[string]openshift.TenantData) 59 | for _, tenant := range tcm.Tenants { 60 | tcmMap[tenant.Name] = openshift.TenantData{ 61 | TenantID: tenant.ID, 62 | CookieSecret: tenant.OpenShift.CookieSecret, 63 | } 64 | } 65 | 66 | return tcmMap 67 | } 68 | 69 | // extractTenantConfigMap extracts tenants.yaml data if valid. 70 | // This is to be used to configure tenant's authentication spec when exists. 71 | func extractTenantConfigMap(cm *corev1.ConfigMap) (*tenantsConfigJSON, error) { 72 | // Extract required fields from tenants.yaml 73 | tenantConfigYAML, ok := cm.BinaryData[LokiGatewayTenantFileName] 74 | if !ok { 75 | return nil, kverrors.New("missing tenants.yaml file in configMap.") 76 | } 77 | 78 | tenantConfigJSON, err := yaml.YAMLToJSON(tenantConfigYAML) 79 | if err != nil { 80 | return nil, kverrors.New("error in converting tenant config yaml to json.") 81 | } 82 | 83 | var tenantConfig tenantsConfigJSON 84 | err = json.Unmarshal(tenantConfigJSON, &tenantConfig) 85 | if err != nil { 86 | return nil, kverrors.New("error in unmarshalling tenant config to struct.") 87 | } 88 | 89 | return &tenantConfig, nil 90 | } 91 | -------------------------------------------------------------------------------- /internal/handlers/internal/gateway/tenant_configmap_test.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/ViaQ/loki-operator/internal/manifests/openshift" 8 | 9 | "github.com/ViaQ/loki-operator/internal/external/k8s/k8sfakes" 10 | "github.com/stretchr/testify/require" 11 | corev1 "k8s.io/api/core/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/types" 14 | ctrl "sigs.k8s.io/controller-runtime" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | ) 17 | 18 | var tenantConfigData = []byte(` 19 | tenants: 20 | - name: application 21 | id: test-123 22 | openshift: 23 | serviceAccount: lokistack-gateway-lokistack-dev 24 | cookieSecret: test123 25 | - name: infrastructure 26 | id: test-456 27 | openshift: 28 | serviceAccount: lokistack-gateway-lokistack-dev 29 | cookieSecret: test456 30 | - name: audit 31 | id: test-789 32 | openshift: 33 | serviceAccount: lokistack-gateway-lokistack-dev 34 | cookieSecret: test789 35 | `) 36 | 37 | func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { 38 | k := &k8sfakes.FakeClient{} 39 | r := ctrl.Request{ 40 | NamespacedName: types.NamespacedName{ 41 | Name: "lokistack-gateway", 42 | Namespace: "some-ns", 43 | }, 44 | } 45 | 46 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 47 | if name.Name == "lokistack-gateway" && name.Namespace == "some-ns" { 48 | k.SetClientObject(object, &corev1.ConfigMap{ 49 | ObjectMeta: metav1.ObjectMeta{ 50 | Name: "lokistack-gateway", 51 | Namespace: "some-ns", 52 | }, 53 | BinaryData: map[string][]byte{ 54 | "tenants.yaml": tenantConfigData, 55 | }, 56 | }) 57 | } 58 | return nil 59 | } 60 | 61 | ts := GetTenantConfigMapData(context.TODO(), k, r) 62 | require.NotNil(t, ts) 63 | 64 | expected := map[string]openshift.TenantData{ 65 | "application": { 66 | TenantID: "test-123", 67 | CookieSecret: "test123", 68 | }, 69 | "infrastructure": { 70 | TenantID: "test-456", 71 | CookieSecret: "test456", 72 | }, 73 | "audit": { 74 | TenantID: "test-789", 75 | CookieSecret: "test789", 76 | }, 77 | } 78 | require.Equal(t, expected, ts) 79 | } 80 | 81 | func TestGetTenantConfigMapData_ConfigMapNotExist(t *testing.T) { 82 | k := &k8sfakes.FakeClient{} 83 | r := ctrl.Request{ 84 | NamespacedName: types.NamespacedName{ 85 | Name: "lokistack-gateway", 86 | Namespace: "some-ns", 87 | }, 88 | } 89 | 90 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 91 | return nil 92 | } 93 | 94 | ts := GetTenantConfigMapData(context.TODO(), k, r) 95 | require.Nil(t, ts) 96 | } 97 | -------------------------------------------------------------------------------- /internal/handlers/internal/gateway/tenant_secrets.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/ViaQ/logerr/kverrors" 8 | 9 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 10 | "github.com/ViaQ/loki-operator/internal/external/k8s" 11 | "github.com/ViaQ/loki-operator/internal/handlers/internal/secrets" 12 | "github.com/ViaQ/loki-operator/internal/manifests" 13 | "github.com/ViaQ/loki-operator/internal/status" 14 | 15 | corev1 "k8s.io/api/core/v1" 16 | apierrors "k8s.io/apimachinery/pkg/api/errors" 17 | ctrl "sigs.k8s.io/controller-runtime" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | ) 20 | 21 | // GetTenantSecrets returns the list to gateway tenant secrets for a tenant mode. 22 | // For modes static and dynamic the secrets are fetched from external provided 23 | // secrets. For mode openshift-logging a secret per default tenants are created. 24 | // All secrets live in the same namespace as the lokistack request. 25 | func GetTenantSecrets( 26 | ctx context.Context, 27 | k k8s.Client, 28 | req ctrl.Request, 29 | stack *lokiv1beta1.LokiStack, 30 | ) ([]*manifests.TenantSecrets, error) { 31 | var ( 32 | tenantSecrets []*manifests.TenantSecrets 33 | gatewaySecret corev1.Secret 34 | ) 35 | 36 | for _, tenant := range stack.Spec.Tenants.Authentication { 37 | key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: req.Namespace} 38 | if err := k.Get(ctx, key, &gatewaySecret); err != nil { 39 | if apierrors.IsNotFound(err) { 40 | statusErr := status.SetDegradedCondition(ctx, k, req, 41 | fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName), 42 | lokiv1beta1.ReasonMissingGatewayTenantSecret, 43 | ) 44 | if statusErr != nil { 45 | return nil, statusErr 46 | } 47 | 48 | return nil, kverrors.Wrap(err, "Missing gateway secrets") 49 | } 50 | return nil, kverrors.Wrap(err, "failed to lookup lokistack gateway tenant secret", 51 | "name", key) 52 | } 53 | 54 | var ts *manifests.TenantSecrets 55 | ts, err := secrets.ExtractGatewaySecret(&gatewaySecret, tenant.TenantName) 56 | if err != nil { 57 | statusErr := status.SetDegradedCondition(ctx, k, req, 58 | "Invalid gateway tenant secret contents", 59 | lokiv1beta1.ReasonInvalidGatewayTenantSecret, 60 | ) 61 | if statusErr != nil { 62 | return nil, statusErr 63 | } 64 | 65 | return nil, kverrors.Wrap(err, "Invalid gateway tenant secret") 66 | } 67 | tenantSecrets = append(tenantSecrets, ts) 68 | } 69 | 70 | return tenantSecrets, nil 71 | } 72 | -------------------------------------------------------------------------------- /internal/handlers/internal/gateway/tenant_secrets_test.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | 9 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 10 | "github.com/ViaQ/loki-operator/internal/external/k8s/k8sfakes" 11 | "github.com/ViaQ/loki-operator/internal/manifests" 12 | 13 | corev1 "k8s.io/api/core/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/types" 16 | ctrl "sigs.k8s.io/controller-runtime" 17 | "sigs.k8s.io/controller-runtime/pkg/client" 18 | ) 19 | 20 | func TestGetTenantSecrets_StaticMode(t *testing.T) { 21 | k := &k8sfakes.FakeClient{} 22 | r := ctrl.Request{ 23 | NamespacedName: types.NamespacedName{ 24 | Name: "my-stack", 25 | Namespace: "some-ns", 26 | }, 27 | } 28 | 29 | s := &lokiv1beta1.LokiStack{ 30 | ObjectMeta: metav1.ObjectMeta{ 31 | Name: "mystack", 32 | Namespace: "some-ns", 33 | }, 34 | Spec: lokiv1beta1.LokiStackSpec{ 35 | Tenants: &lokiv1beta1.TenantsSpec{ 36 | Mode: lokiv1beta1.Static, 37 | Authentication: []lokiv1beta1.AuthenticationSpec{ 38 | { 39 | TenantName: "test", 40 | TenantID: "test", 41 | OIDC: &lokiv1beta1.OIDCSpec{ 42 | Secret: &lokiv1beta1.TenantSecretSpec{ 43 | Name: "test", 44 | }, 45 | }, 46 | }, 47 | }, 48 | }, 49 | }, 50 | } 51 | 52 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 53 | if name.Name == "test" && name.Namespace == "some-ns" { 54 | k.SetClientObject(object, &corev1.Secret{ 55 | ObjectMeta: metav1.ObjectMeta{ 56 | Name: "test", 57 | Namespace: "some-ns", 58 | }, 59 | Data: map[string][]byte{ 60 | "clientID": []byte("test"), 61 | "clientSecret": []byte("test"), 62 | "issuerCAPath": []byte("/path/to/ca/file"), 63 | }, 64 | }) 65 | } 66 | return nil 67 | } 68 | 69 | ts, err := GetTenantSecrets(context.TODO(), k, r, s) 70 | require.NoError(t, err) 71 | 72 | expected := []*manifests.TenantSecrets{ 73 | { 74 | TenantName: "test", 75 | ClientID: "test", 76 | ClientSecret: "test", 77 | IssuerCAPath: "/path/to/ca/file", 78 | }, 79 | } 80 | require.ElementsMatch(t, ts, expected) 81 | } 82 | 83 | func TestGetTenantSecrets_DynamicMode(t *testing.T) { 84 | k := &k8sfakes.FakeClient{} 85 | r := ctrl.Request{ 86 | NamespacedName: types.NamespacedName{ 87 | Name: "my-stack", 88 | Namespace: "some-ns", 89 | }, 90 | } 91 | 92 | s := &lokiv1beta1.LokiStack{ 93 | ObjectMeta: metav1.ObjectMeta{ 94 | Name: "mystack", 95 | Namespace: "some-ns", 96 | }, 97 | Spec: lokiv1beta1.LokiStackSpec{ 98 | Tenants: &lokiv1beta1.TenantsSpec{ 99 | Mode: lokiv1beta1.Dynamic, 100 | Authentication: []lokiv1beta1.AuthenticationSpec{ 101 | { 102 | TenantName: "test", 103 | TenantID: "test", 104 | OIDC: &lokiv1beta1.OIDCSpec{ 105 | Secret: &lokiv1beta1.TenantSecretSpec{ 106 | Name: "test", 107 | }, 108 | }, 109 | }, 110 | }, 111 | }, 112 | }, 113 | } 114 | 115 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 116 | if name.Name == "test" && name.Namespace == "some-ns" { 117 | k.SetClientObject(object, &corev1.Secret{ 118 | ObjectMeta: metav1.ObjectMeta{ 119 | Name: "test", 120 | Namespace: "some-ns", 121 | }, 122 | Data: map[string][]byte{ 123 | "clientID": []byte("test"), 124 | "clientSecret": []byte("test"), 125 | "issuerCAPath": []byte("/path/to/ca/file"), 126 | }, 127 | }) 128 | } 129 | return nil 130 | } 131 | 132 | ts, err := GetTenantSecrets(context.TODO(), k, r, s) 133 | require.NoError(t, err) 134 | 135 | expected := []*manifests.TenantSecrets{ 136 | { 137 | TenantName: "test", 138 | ClientID: "test", 139 | ClientSecret: "test", 140 | IssuerCAPath: "/path/to/ca/file", 141 | }, 142 | } 143 | require.ElementsMatch(t, ts, expected) 144 | } 145 | -------------------------------------------------------------------------------- /internal/handlers/internal/secrets/secrets.go: -------------------------------------------------------------------------------- 1 | package secrets 2 | 3 | import ( 4 | "github.com/ViaQ/logerr/kverrors" 5 | "github.com/ViaQ/loki-operator/internal/manifests" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | // Extract reads a k8s secret into a manifest object storage struct if valid. 11 | func Extract(s *corev1.Secret) (*manifests.ObjectStorage, error) { 12 | // Extract and validate mandatory fields 13 | endpoint, ok := s.Data["endpoint"] 14 | if !ok { 15 | return nil, kverrors.New("missing secret field", "field", "endpoint") 16 | } 17 | buckets, ok := s.Data["bucketnames"] 18 | if !ok { 19 | return nil, kverrors.New("missing secret field", "field", "bucketnames") 20 | } 21 | // TODO buckets are comma-separated list 22 | id, ok := s.Data["access_key_id"] 23 | if !ok { 24 | return nil, kverrors.New("missing secret field", "field", "access_key_id") 25 | } 26 | secret, ok := s.Data["access_key_secret"] 27 | if !ok { 28 | return nil, kverrors.New("missing secret field", "field", "access_key_secret") 29 | } 30 | 31 | // Extract and validate optional fields 32 | region, ok := s.Data["region"] 33 | if !ok { 34 | region = []byte("") 35 | } 36 | 37 | return &manifests.ObjectStorage{ 38 | Endpoint: string(endpoint), 39 | Buckets: string(buckets), 40 | AccessKeyID: string(id), 41 | AccessKeySecret: string(secret), 42 | Region: string(region), 43 | }, nil 44 | } 45 | 46 | // ExtractGatewaySecret reads a k8s secret into a manifest tenant secret struct if valid. 47 | func ExtractGatewaySecret(s *corev1.Secret, tenantName string) (*manifests.TenantSecrets, error) { 48 | // Extract and validate mandatory fields 49 | clientID, ok := s.Data["clientID"] 50 | if !ok { 51 | return nil, kverrors.New("missing clientID field", "field", "clientID") 52 | } 53 | clientSecret, ok := s.Data["clientSecret"] 54 | if !ok { 55 | return nil, kverrors.New("missing clientSecret field", "field", "clientSecret") 56 | } 57 | issuerCAPath, ok := s.Data["issuerCAPath"] 58 | if !ok { 59 | return nil, kverrors.New("missing issuerCAPath field", "field", "issuerCAPath") 60 | } 61 | 62 | return &manifests.TenantSecrets{ 63 | TenantName: tenantName, 64 | ClientID: string(clientID), 65 | ClientSecret: string(clientSecret), 66 | IssuerCAPath: string(issuerCAPath), 67 | }, nil 68 | } 69 | -------------------------------------------------------------------------------- /internal/handlers/internal/secrets/secrets_test.go: -------------------------------------------------------------------------------- 1 | package secrets_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/ViaQ/loki-operator/internal/handlers/internal/secrets" 7 | "github.com/stretchr/testify/require" 8 | corev1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | func TestExtract(t *testing.T) { 12 | type test struct { 13 | name string 14 | secret *corev1.Secret 15 | wantErr bool 16 | } 17 | table := []test{ 18 | { 19 | name: "missing endpoint", 20 | secret: &corev1.Secret{}, 21 | wantErr: true, 22 | }, 23 | { 24 | name: "missing bucketnames", 25 | secret: &corev1.Secret{ 26 | Data: map[string][]byte{ 27 | "endpoint": []byte("here"), 28 | }, 29 | }, 30 | wantErr: true, 31 | }, 32 | { 33 | name: "missing access_key_id", 34 | secret: &corev1.Secret{ 35 | Data: map[string][]byte{ 36 | "endpoint": []byte("here"), 37 | "bucketnames": []byte("this,that"), 38 | }, 39 | }, 40 | wantErr: true, 41 | }, 42 | { 43 | name: "missing access_key_secret", 44 | secret: &corev1.Secret{ 45 | Data: map[string][]byte{ 46 | "endpoint": []byte("here"), 47 | "bucketnames": []byte("this,that"), 48 | "access_key_id": []byte("id"), 49 | }, 50 | }, 51 | wantErr: true, 52 | }, 53 | { 54 | name: "all set", 55 | secret: &corev1.Secret{ 56 | Data: map[string][]byte{ 57 | "endpoint": []byte("here"), 58 | "bucketnames": []byte("this,that"), 59 | "access_key_id": []byte("id"), 60 | "access_key_secret": []byte("secret"), 61 | }, 62 | }, 63 | }, 64 | } 65 | for _, tst := range table { 66 | tst := tst 67 | t.Run(tst.name, func(t *testing.T) { 68 | t.Parallel() 69 | 70 | _, err := secrets.Extract(tst.secret) 71 | if !tst.wantErr { 72 | require.NoError(t, err) 73 | } 74 | if tst.wantErr { 75 | require.NotNil(t, err) 76 | } 77 | }) 78 | } 79 | } 80 | 81 | func TestExtractGatewaySecret(t *testing.T) { 82 | type test struct { 83 | name string 84 | tenantName string 85 | secret *corev1.Secret 86 | wantErr bool 87 | } 88 | table := []test{ 89 | { 90 | name: "missing clientID", 91 | tenantName: "tenant-a", 92 | secret: &corev1.Secret{}, 93 | wantErr: true, 94 | }, 95 | { 96 | name: "missing clientSecret", 97 | tenantName: "tenant-a", 98 | secret: &corev1.Secret{ 99 | Data: map[string][]byte{ 100 | "clientID": []byte("test"), 101 | }, 102 | }, 103 | wantErr: true, 104 | }, 105 | { 106 | name: "missing issuerCAPath", 107 | tenantName: "tenant-a", 108 | secret: &corev1.Secret{ 109 | Data: map[string][]byte{ 110 | "clientID": []byte("test"), 111 | "clientSecret": []byte("test"), 112 | }, 113 | }, 114 | wantErr: true, 115 | }, 116 | { 117 | name: "all set", 118 | tenantName: "tenant-a", 119 | secret: &corev1.Secret{ 120 | Data: map[string][]byte{ 121 | "clientID": []byte("test"), 122 | "clientSecret": []byte("test"), 123 | "issuerCAPath": []byte("/tmp/test"), 124 | }, 125 | }, 126 | }, 127 | } 128 | for _, tst := range table { 129 | tst := tst 130 | t.Run(tst.name, func(t *testing.T) { 131 | t.Parallel() 132 | 133 | _, err := secrets.ExtractGatewaySecret(tst.secret, tst.tenantName) 134 | if !tst.wantErr { 135 | require.NoError(t, err) 136 | } 137 | if tst.wantErr { 138 | require.NotNil(t, err) 139 | } 140 | }) 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /internal/manifests/build.go: -------------------------------------------------------------------------------- 1 | package manifests 2 | 3 | import ( 4 | "github.com/ViaQ/logerr/kverrors" 5 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 6 | "github.com/ViaQ/loki-operator/internal/manifests/internal" 7 | 8 | "github.com/imdario/mergo" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | // BuildAll builds all manifests required to run a Loki Stack 13 | func BuildAll(opts Options) ([]client.Object, error) { 14 | res := make([]client.Object, 0) 15 | 16 | cm, sha1C, mapErr := LokiConfigMap(opts) 17 | if mapErr != nil { 18 | return nil, mapErr 19 | } 20 | opts.ConfigSHA1 = sha1C 21 | 22 | distributorObjs, err := BuildDistributor(opts) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | ingesterObjs, err := BuildIngester(opts) 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | querierObjs, err := BuildQuerier(opts) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | compactorObjs, err := BuildCompactor(opts) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | queryFrontendObjs, err := BuildQueryFrontend(opts) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | indexGatewayObjs, err := BuildIndexGateway(opts) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | res = append(res, cm) 53 | res = append(res, distributorObjs...) 54 | res = append(res, ingesterObjs...) 55 | res = append(res, querierObjs...) 56 | res = append(res, compactorObjs...) 57 | res = append(res, queryFrontendObjs...) 58 | res = append(res, indexGatewayObjs...) 59 | res = append(res, BuildLokiGossipRingService(opts.Name)) 60 | 61 | if opts.Flags.EnableGateway { 62 | gatewayObjects, err := BuildGateway(opts) 63 | if err != nil { 64 | return nil, err 65 | } 66 | 67 | res = append(res, gatewayObjects...) 68 | } 69 | 70 | if opts.Flags.EnableServiceMonitors { 71 | res = append(res, BuildServiceMonitors(opts)...) 72 | } 73 | 74 | return res, nil 75 | } 76 | 77 | // DefaultLokiStackSpec returns the default configuration for a LokiStack of 78 | // the specified size 79 | func DefaultLokiStackSpec(size lokiv1beta1.LokiStackSizeType) *lokiv1beta1.LokiStackSpec { 80 | defaults := internal.StackSizeTable[size] 81 | return (&defaults).DeepCopy() 82 | } 83 | 84 | // ApplyDefaultSettings manipulates the options to conform to 85 | // build specifications 86 | func ApplyDefaultSettings(opts *Options) error { 87 | spec := DefaultLokiStackSpec(opts.Stack.Size) 88 | 89 | if err := mergo.Merge(spec, opts.Stack, mergo.WithOverride); err != nil { 90 | return kverrors.Wrap(err, "failed merging stack user options", "name", opts.Name) 91 | } 92 | 93 | strictOverrides := lokiv1beta1.LokiStackSpec{ 94 | Template: &lokiv1beta1.LokiTemplateSpec{ 95 | Compactor: &lokiv1beta1.LokiComponentSpec{ 96 | // Compactor is a singelton application. 97 | // Only one replica allowed!!! 98 | Replicas: 1, 99 | }, 100 | }, 101 | } 102 | 103 | if err := mergo.Merge(spec, strictOverrides, mergo.WithOverride); err != nil { 104 | return kverrors.Wrap(err, "failed to merge strict defaults") 105 | } 106 | 107 | opts.ResourceRequirements = internal.ResourceRequirementsTable[opts.Stack.Size] 108 | opts.Stack = *spec 109 | 110 | return nil 111 | } 112 | -------------------------------------------------------------------------------- /internal/manifests/compactor_test.go: -------------------------------------------------------------------------------- 1 | package manifests_test 2 | 3 | import ( 4 | "testing" 5 | 6 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 7 | "github.com/ViaQ/loki-operator/internal/manifests" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestNewCompactorStatefulSet_SelectorMatchesLabels(t *testing.T) { 12 | // You must set the .spec.selector field of a StatefulSet to match the labels of 13 | // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the 14 | // .spec.selector field was defaulted when omitted. In 1.8 and later versions, 15 | // failing to specify a matching Pod Selector will result in a validation error 16 | // during StatefulSet creation. 17 | // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector 18 | sts := manifests.NewCompactorStatefulSet(manifests.Options{ 19 | Name: "abcd", 20 | Namespace: "efgh", 21 | Stack: lokiv1beta1.LokiStackSpec{ 22 | StorageClassName: "standard", 23 | Template: &lokiv1beta1.LokiTemplateSpec{ 24 | Compactor: &lokiv1beta1.LokiComponentSpec{ 25 | Replicas: 1, 26 | }, 27 | }, 28 | }, 29 | }) 30 | 31 | l := sts.Spec.Template.GetObjectMeta().GetLabels() 32 | for key, value := range sts.Spec.Selector.MatchLabels { 33 | require.Contains(t, l, key) 34 | require.Equal(t, l[key], value) 35 | } 36 | } 37 | 38 | func TestNewCompactorStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) { 39 | ss := manifests.NewCompactorStatefulSet(manifests.Options{ 40 | Name: "abcd", 41 | Namespace: "efgh", 42 | ConfigSHA1: "deadbeef", 43 | Stack: lokiv1beta1.LokiStackSpec{ 44 | StorageClassName: "standard", 45 | Template: &lokiv1beta1.LokiTemplateSpec{ 46 | Compactor: &lokiv1beta1.LokiComponentSpec{ 47 | Replicas: 1, 48 | }, 49 | }, 50 | }, 51 | }) 52 | expected := "loki.openshift.io/config-hash" 53 | annotations := ss.Spec.Template.Annotations 54 | require.Contains(t, annotations, expected) 55 | require.Equal(t, annotations[expected], "deadbeef") 56 | } 57 | -------------------------------------------------------------------------------- /internal/manifests/config.go: -------------------------------------------------------------------------------- 1 | package manifests 2 | 3 | import ( 4 | "crypto/sha1" 5 | "fmt" 6 | 7 | "github.com/ViaQ/loki-operator/internal/manifests/internal/config" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | // LokiConfigMap creates the single configmap containing the loki configuration for the whole cluster 13 | func LokiConfigMap(opt Options) (*corev1.ConfigMap, string, error) { 14 | cfg := ConfigOptions(opt) 15 | c, rc, err := config.Build(cfg) 16 | if err != nil { 17 | return nil, "", err 18 | } 19 | 20 | s := sha1.New() 21 | _, err = s.Write(c) 22 | if err != nil { 23 | return nil, "", err 24 | } 25 | sha1C := fmt.Sprintf("%x", s.Sum(nil)) 26 | 27 | return &corev1.ConfigMap{ 28 | TypeMeta: metav1.TypeMeta{ 29 | Kind: "ConfigMap", 30 | APIVersion: corev1.SchemeGroupVersion.String(), 31 | }, 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: lokiConfigMapName(opt.Name), 34 | Labels: commonLabels(opt.Name), 35 | }, 36 | BinaryData: map[string][]byte{ 37 | config.LokiConfigFileName: c, 38 | config.LokiRuntimeConfigFileName: rc, 39 | }, 40 | }, sha1C, nil 41 | } 42 | 43 | // ConfigOptions converts Options to config.Options 44 | func ConfigOptions(opt Options) config.Options { 45 | return config.Options{ 46 | Stack: opt.Stack, 47 | Namespace: opt.Namespace, 48 | Name: opt.Name, 49 | FrontendWorker: config.Address{ 50 | FQDN: fqdn(NewQueryFrontendGRPCService(opt).GetName(), opt.Namespace), 51 | Port: grpcPort, 52 | }, 53 | GossipRing: config.Address{ 54 | FQDN: fqdn(BuildLokiGossipRingService(opt.Name).GetName(), opt.Namespace), 55 | Port: gossipPort, 56 | }, 57 | Querier: config.Address{ 58 | FQDN: fqdn(NewQuerierHTTPService(opt).GetName(), opt.Namespace), 59 | Port: httpPort, 60 | }, 61 | IndexGateway: config.Address{ 62 | FQDN: fqdn(NewIndexGatewayGRPCService(opt).GetName(), opt.Namespace), 63 | Port: grpcPort, 64 | }, 65 | StorageDirectory: dataDirectory, 66 | ObjectStorage: config.ObjectStorage{ 67 | Endpoint: opt.ObjectStorage.Endpoint, 68 | Buckets: opt.ObjectStorage.Buckets, 69 | Region: opt.ObjectStorage.Region, 70 | AccessKeyID: opt.ObjectStorage.AccessKeyID, 71 | AccessKeySecret: opt.ObjectStorage.AccessKeySecret, 72 | }, 73 | QueryParallelism: config.Parallelism{ 74 | QuerierCPULimits: opt.ResourceRequirements.Querier.Requests.Cpu().Value(), 75 | QueryFrontendReplicas: opt.Stack.Template.QueryFrontend.Replicas, 76 | }, 77 | WriteAheadLog: config.WriteAheadLog{ 78 | Directory: walDirectory, 79 | IngesterMemoryRequest: opt.ResourceRequirements.Ingester.Requests.Memory().Value(), 80 | }, 81 | } 82 | } 83 | 84 | func lokiConfigMapName(stackName string) string { 85 | return fmt.Sprintf("loki-config-%s", stackName) 86 | } 87 | -------------------------------------------------------------------------------- /internal/manifests/distributor_test.go: -------------------------------------------------------------------------------- 1 | package manifests_test 2 | 3 | import ( 4 | "testing" 5 | 6 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 7 | "github.com/ViaQ/loki-operator/internal/manifests" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestNewDistributorDeployment_SelectorMatchesLabels(t *testing.T) { 12 | dpl := manifests.NewDistributorDeployment(manifests.Options{ 13 | Name: "abcd", 14 | Namespace: "efgh", 15 | Stack: lokiv1beta1.LokiStackSpec{ 16 | Template: &lokiv1beta1.LokiTemplateSpec{ 17 | Distributor: &lokiv1beta1.LokiComponentSpec{ 18 | Replicas: 1, 19 | }, 20 | }, 21 | }, 22 | }) 23 | 24 | l := dpl.Spec.Template.GetObjectMeta().GetLabels() 25 | for key, value := range dpl.Spec.Selector.MatchLabels { 26 | require.Contains(t, l, key) 27 | require.Equal(t, l[key], value) 28 | } 29 | } 30 | 31 | func TestNewDistributorDeployme_HasTemplateConfigHashAnnotation(t *testing.T) { 32 | ss := manifests.NewDistributorDeployment(manifests.Options{ 33 | Name: "abcd", 34 | Namespace: "efgh", 35 | ConfigSHA1: "deadbeef", 36 | Stack: lokiv1beta1.LokiStackSpec{ 37 | Template: &lokiv1beta1.LokiTemplateSpec{ 38 | Distributor: &lokiv1beta1.LokiComponentSpec{ 39 | Replicas: 1, 40 | }, 41 | }, 42 | }, 43 | }) 44 | 45 | expected := "loki.openshift.io/config-hash" 46 | annotations := ss.Spec.Template.Annotations 47 | require.Contains(t, annotations, expected) 48 | require.Equal(t, annotations[expected], "deadbeef") 49 | } 50 | -------------------------------------------------------------------------------- /internal/manifests/gateway_tenants.go: -------------------------------------------------------------------------------- 1 | package manifests 2 | 3 | import ( 4 | "github.com/ViaQ/logerr/kverrors" 5 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 6 | "github.com/ViaQ/loki-operator/internal/manifests/internal/gateway" 7 | "github.com/ViaQ/loki-operator/internal/manifests/openshift" 8 | "github.com/imdario/mergo" 9 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 10 | 11 | appsv1 "k8s.io/api/apps/v1" 12 | corev1 "k8s.io/api/core/v1" 13 | networkingv1 "k8s.io/api/networking/v1" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | // ApplyGatewayDefaultOptions applies defaults on the LokiStackSpec depending on selected 18 | // tenant mode. Currently nothing is applied for modes static and dynamic. For mode openshift-logging 19 | // the tenant spec is filled with defaults for authentication and authorization. 20 | func ApplyGatewayDefaultOptions(opts *Options) error { 21 | if opts.Stack.Tenants == nil { 22 | return nil 23 | } 24 | 25 | switch opts.Stack.Tenants.Mode { 26 | case lokiv1beta1.Static, lokiv1beta1.Dynamic: 27 | return nil // continue using user input 28 | 29 | case lokiv1beta1.OpenshiftLogging: 30 | defaults := openshift.NewOptions( 31 | opts.Name, 32 | GatewayName(opts.Name), 33 | opts.Namespace, 34 | opts.GatewayBaseDomain, 35 | serviceNameGatewayHTTP(opts.Name), 36 | gatewayHTTPPortName, 37 | ComponentLabels(LabelGatewayComponent, opts.Name), 38 | opts.Flags.EnableCertificateSigningService, 39 | opts.TenantConfigMap, 40 | ) 41 | 42 | if err := mergo.Merge(&opts.OpenShiftOptions, &defaults, mergo.WithOverride); err != nil { 43 | return kverrors.Wrap(err, "failed to merge defaults for mode openshift logging") 44 | } 45 | 46 | } 47 | 48 | return nil 49 | } 50 | 51 | func configureDeploymentForMode(d *appsv1.Deployment, mode lokiv1beta1.ModeType, flags FeatureFlags) error { 52 | switch mode { 53 | case lokiv1beta1.Static, lokiv1beta1.Dynamic: 54 | return nil // nothing to configure 55 | case lokiv1beta1.OpenshiftLogging: 56 | return openshift.ConfigureGatewayDeployment( 57 | d, 58 | gatewayContainerName, 59 | tlsMetricsSercetVolume, 60 | gateway.LokiGatewayTLSDir, 61 | gateway.LokiGatewayCertFile, 62 | gateway.LokiGatewayKeyFile, 63 | gateway.LokiGatewayCABundleDir, 64 | gateway.LokiGatewayCAFile, 65 | flags.EnableTLSServiceMonitorConfig, 66 | flags.EnableCertificateSigningService, 67 | ) 68 | } 69 | 70 | return nil 71 | } 72 | 73 | func configureServiceForMode(s *corev1.ServiceSpec, mode lokiv1beta1.ModeType) error { 74 | switch mode { 75 | case lokiv1beta1.Static, lokiv1beta1.Dynamic: 76 | return nil // nothing to configure 77 | case lokiv1beta1.OpenshiftLogging: 78 | return openshift.ConfigureGatewayService(s) 79 | } 80 | 81 | return nil 82 | } 83 | 84 | func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Object { 85 | switch opts.Stack.Tenants.Mode { 86 | case lokiv1beta1.Static, lokiv1beta1.Dynamic: 87 | // nothing to configure 88 | case lokiv1beta1.OpenshiftLogging: 89 | openShiftObjs := openshift.Build(opts.OpenShiftOptions) 90 | 91 | var cObjs []client.Object 92 | for _, o := range objs { 93 | switch o.(type) { 94 | // Drop Ingress in favor of Route in OpenShift. 95 | // Ingress is not supported as OAuthRedirectReference 96 | // in ServiceAccounts used as OAuthClient in OpenShift. 97 | case *networkingv1.Ingress: 98 | continue 99 | } 100 | 101 | cObjs = append(cObjs, o) 102 | } 103 | 104 | objs = append(cObjs, openShiftObjs...) 105 | } 106 | 107 | return objs 108 | } 109 | 110 | func configureServiceMonitorForMode(sm *monitoringv1.ServiceMonitor, mode lokiv1beta1.ModeType, flags FeatureFlags) error { 111 | switch mode { 112 | case lokiv1beta1.Static, lokiv1beta1.Dynamic: 113 | return nil // nothing to configure 114 | case lokiv1beta1.OpenshiftLogging: 115 | return openshift.ConfigureGatewayServiceMonitor(sm, flags.EnableTLSServiceMonitorConfig) 116 | } 117 | 118 | return nil 119 | } 120 | -------------------------------------------------------------------------------- /internal/manifests/indexgateway_test.go: -------------------------------------------------------------------------------- 1 | package manifests_test 2 | 3 | import ( 4 | "testing" 5 | 6 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 7 | "github.com/ViaQ/loki-operator/internal/manifests" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestNewIndexGatewayStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) { 12 | ss := manifests.NewIndexGatewayStatefulSet(manifests.Options{ 13 | Name: "abcd", 14 | Namespace: "efgh", 15 | ConfigSHA1: "deadbeef", 16 | Stack: lokiv1beta1.LokiStackSpec{ 17 | StorageClassName: "standard", 18 | Template: &lokiv1beta1.LokiTemplateSpec{ 19 | IndexGateway: &lokiv1beta1.LokiComponentSpec{ 20 | Replicas: 1, 21 | }, 22 | }, 23 | }, 24 | }) 25 | 26 | expected := "loki.openshift.io/config-hash" 27 | annotations := ss.Spec.Template.Annotations 28 | require.Contains(t, annotations, expected) 29 | require.Equal(t, annotations[expected], "deadbeef") 30 | } 31 | 32 | func TestNewIndexGatewayStatefulSet_SelectorMatchesLabels(t *testing.T) { 33 | // You must set the .spec.selector field of a StatefulSet to match the labels of 34 | // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the 35 | // .spec.selector field was defaulted when omitted. In 1.8 and later versions, 36 | // failing to specify a matching Pod Selector will result in a validation error 37 | // during StatefulSet creation. 38 | // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector 39 | ss := manifests.NewIndexGatewayStatefulSet(manifests.Options{ 40 | Name: "abcd", 41 | Namespace: "efgh", 42 | Stack: lokiv1beta1.LokiStackSpec{ 43 | StorageClassName: "standard", 44 | Template: &lokiv1beta1.LokiTemplateSpec{ 45 | IndexGateway: &lokiv1beta1.LokiComponentSpec{ 46 | Replicas: 1, 47 | }, 48 | }, 49 | }, 50 | }) 51 | 52 | l := ss.Spec.Template.GetObjectMeta().GetLabels() 53 | for key, value := range ss.Spec.Selector.MatchLabels { 54 | require.Contains(t, l, key) 55 | require.Equal(t, l[key], value) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /internal/manifests/ingester_test.go: -------------------------------------------------------------------------------- 1 | package manifests_test 2 | 3 | import ( 4 | "testing" 5 | 6 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 7 | "github.com/ViaQ/loki-operator/internal/manifests" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestNewIngesterStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) { 12 | ss := manifests.NewIngesterStatefulSet(manifests.Options{ 13 | Name: "abcd", 14 | Namespace: "efgh", 15 | ConfigSHA1: "deadbeef", 16 | Stack: lokiv1beta1.LokiStackSpec{ 17 | StorageClassName: "standard", 18 | Template: &lokiv1beta1.LokiTemplateSpec{ 19 | Ingester: &lokiv1beta1.LokiComponentSpec{ 20 | Replicas: 1, 21 | }, 22 | }, 23 | }, 24 | }) 25 | 26 | expected := "loki.openshift.io/config-hash" 27 | annotations := ss.Spec.Template.Annotations 28 | require.Contains(t, annotations, expected) 29 | require.Equal(t, annotations[expected], "deadbeef") 30 | } 31 | 32 | func TestNewIngesterStatefulSet_SelectorMatchesLabels(t *testing.T) { 33 | // You must set the .spec.selector field of a StatefulSet to match the labels of 34 | // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the 35 | // .spec.selector field was defaulted when omitted. In 1.8 and later versions, 36 | // failing to specify a matching Pod Selector will result in a validation error 37 | // during StatefulSet creation. 38 | // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector 39 | sts := manifests.NewIngesterStatefulSet(manifests.Options{ 40 | Name: "abcd", 41 | Namespace: "efgh", 42 | Stack: lokiv1beta1.LokiStackSpec{ 43 | StorageClassName: "standard", 44 | Template: &lokiv1beta1.LokiTemplateSpec{ 45 | Ingester: &lokiv1beta1.LokiComponentSpec{ 46 | Replicas: 1, 47 | }, 48 | }, 49 | }, 50 | }) 51 | 52 | l := sts.Spec.Template.GetObjectMeta().GetLabels() 53 | for key, value := range sts.Spec.Selector.MatchLabels { 54 | require.Contains(t, l, key) 55 | require.Equal(t, l[key], value) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /internal/manifests/internal/config/build.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | "embed" 6 | "io/ioutil" 7 | "text/template" 8 | 9 | "github.com/ViaQ/logerr/kverrors" 10 | ) 11 | 12 | const ( 13 | // LokiConfigFileName is the name of the config file in the configmap 14 | LokiConfigFileName = "config.yaml" 15 | // LokiRuntimeConfigFileName is the name of the runtime config file in the configmap 16 | LokiRuntimeConfigFileName = "runtime-config.yaml" 17 | // LokiConfigMountDir is the path that is mounted from the configmap 18 | LokiConfigMountDir = "/etc/loki/config" 19 | ) 20 | 21 | var ( 22 | //go:embed loki-config.yaml 23 | lokiConfigYAMLTmplFile embed.FS 24 | 25 | //go:embed loki-runtime-config.yaml 26 | lokiRuntimeConfigYAMLTmplFile embed.FS 27 | 28 | lokiConfigYAMLTmpl = template.Must(template.ParseFS(lokiConfigYAMLTmplFile, "loki-config.yaml")) 29 | 30 | lokiRuntimeConfigYAMLTmpl = template.Must(template.ParseFS(lokiRuntimeConfigYAMLTmplFile, "loki-runtime-config.yaml")) 31 | ) 32 | 33 | // Build builds a loki stack configuration files 34 | func Build(opts Options) ([]byte, []byte, error) { 35 | // Build loki config yaml 36 | w := bytes.NewBuffer(nil) 37 | err := lokiConfigYAMLTmpl.Execute(w, opts) 38 | if err != nil { 39 | return nil, nil, kverrors.Wrap(err, "failed to create loki configuration") 40 | } 41 | cfg, err := ioutil.ReadAll(w) 42 | if err != nil { 43 | return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") 44 | } 45 | // Build loki runtime config yaml 46 | w = bytes.NewBuffer(nil) 47 | err = lokiRuntimeConfigYAMLTmpl.Execute(w, opts) 48 | if err != nil { 49 | return nil, nil, kverrors.Wrap(err, "failed to create loki runtime configuration") 50 | } 51 | rcfg, err := ioutil.ReadAll(w) 52 | if err != nil { 53 | return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") 54 | } 55 | return cfg, rcfg, nil 56 | } 57 | -------------------------------------------------------------------------------- /internal/manifests/internal/config/loki-runtime-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | overrides: 3 | {{- range $tenant, $spec := .Stack.Limits.Tenants }} 4 | {{ $tenant }}: 5 | {{- if $l := $spec.IngestionLimits -}} 6 | {{ if $l.IngestionRate }} 7 | ingestion_rate_mb: {{ $l.IngestionRate }} 8 | {{- end -}} 9 | {{ if $l.IngestionBurstSize }} 10 | ingestion_burst_size_mb: {{ $l.IngestionBurstSize }} 11 | {{- end -}} 12 | {{ if $l.MaxLabelNameLength }} 13 | max_label_name_length: {{ $l.MaxLabelNameLength }} 14 | {{- end -}} 15 | {{ if $l.MaxLabelValueLength }} 16 | max_label_value_length: {{ $l.MaxLabelValueLength }} 17 | {{- end -}} 18 | {{ if $l.MaxLabelNamesPerSeries }} 19 | max_label_names_per_series: {{ $l.MaxLabelNamesPerSeries }} 20 | {{- end -}} 21 | {{ if $l.MaxLineSize }} 22 | max_line_size: {{ $l.MaxLineSize }} 23 | {{- end -}} 24 | {{ if $l.MaxGlobalStreamsPerTenant }} 25 | max_global_streams_per_user: {{ $l.MaxGlobalStreamsPerTenant }} 26 | {{- end -}} 27 | {{- end -}} 28 | {{- if $l := $spec.QueryLimits -}} 29 | {{ if $l.MaxEntriesLimitPerQuery }} 30 | max_entries_limit_per_query: {{ $spec.QueryLimits.MaxEntriesLimitPerQuery }} 31 | {{- end -}} 32 | {{ if $spec.QueryLimits.MaxChunksPerQuery }} 33 | max_chunks_per_query: {{ $spec.QueryLimits.MaxChunksPerQuery }} 34 | {{- end -}} 35 | {{ if $spec.QueryLimits.MaxQuerySeries }} 36 | max_query_series: {{ $spec.QueryLimits.MaxQuerySeries }} 37 | {{- end -}} 38 | {{- end -}} 39 | {{- end -}} 40 | -------------------------------------------------------------------------------- /internal/manifests/internal/config/options.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | 7 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 8 | ) 9 | 10 | // Options is used to render the loki-config.yaml file template 11 | type Options struct { 12 | Stack lokiv1beta1.LokiStackSpec 13 | 14 | Namespace string 15 | Name string 16 | FrontendWorker Address 17 | GossipRing Address 18 | Querier Address 19 | IndexGateway Address 20 | StorageDirectory string 21 | ObjectStorage ObjectStorage 22 | QueryParallelism Parallelism 23 | WriteAheadLog WriteAheadLog 24 | } 25 | 26 | // Address FQDN and port for a k8s service. 27 | type Address struct { 28 | // FQDN is required 29 | FQDN string 30 | // Port is required 31 | Port int 32 | } 33 | 34 | // ObjectStorage for storage config. 35 | type ObjectStorage struct { 36 | Endpoint string 37 | Region string 38 | Buckets string 39 | AccessKeyID string 40 | AccessKeySecret string 41 | } 42 | 43 | // Parallelism for query processing parallelism 44 | // and rate limiting. 45 | type Parallelism struct { 46 | QuerierCPULimits int64 47 | QueryFrontendReplicas int32 48 | } 49 | 50 | // WriteAheadLog for ingester processing 51 | type WriteAheadLog struct { 52 | Directory string 53 | IngesterMemoryRequest int64 54 | } 55 | 56 | // Value calculates the floor of the division of 57 | // querier cpu limits to the query frontend replicas 58 | // available. 59 | func (p Parallelism) Value() int32 { 60 | return int32(math.Floor(float64(p.QuerierCPULimits) / float64(p.QueryFrontendReplicas))) 61 | } 62 | 63 | // ReplayMemoryCeiling calculates 50% of the ingester memory 64 | // for the ingester to use for the write-ahead-log capbability. 65 | func (w WriteAheadLog) ReplayMemoryCeiling() string { 66 | value := int64(math.Ceil(float64(w.IngesterMemoryRequest) * float64(0.5))) 67 | return fmt.Sprintf("%d", value) 68 | } 69 | -------------------------------------------------------------------------------- /internal/manifests/internal/gateway/build.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "bytes" 5 | "embed" 6 | "io/ioutil" 7 | "text/template" 8 | 9 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 10 | 11 | "github.com/ViaQ/logerr/kverrors" 12 | ) 13 | 14 | const ( 15 | // LokiGatewayTenantFileName is the name of the tenant config file in the configmap 16 | LokiGatewayTenantFileName = "tenants.yaml" 17 | // LokiGatewayRbacFileName is the name of the rbac config file in the configmap 18 | LokiGatewayRbacFileName = "rbac.yaml" 19 | // LokiGatewayRegoFileName is the name of the lokistack-gateway rego config file in the configmap 20 | LokiGatewayRegoFileName = "lokistack-gateway.rego" 21 | // LokiGatewayMountDir is the path that is mounted from the configmap 22 | LokiGatewayMountDir = "/etc/lokistack-gateway" 23 | // LokiGatewayTLSDir is the path that is mounted from the configmap for TLS 24 | LokiGatewayTLSDir = "/var/run/tls" 25 | // LokiGatewayCABundleDir is the path that is mounted from the configmap for TLS 26 | LokiGatewayCABundleDir = "/var/run/ca" 27 | // LokiGatewayCAFile is the file name of the certificate authority file 28 | LokiGatewayCAFile = "service-ca.crt" 29 | // LokiGatewayCertFile is the file of the X509 server certificate file 30 | LokiGatewayCertFile = "tls.crt" 31 | // LokiGatewayKeyFile is the file name of the server private key 32 | LokiGatewayKeyFile = "tls.key" 33 | ) 34 | 35 | var ( 36 | //go:embed gateway-rbac.yaml 37 | lokiGatewayRbacYAMLTmplFile embed.FS 38 | 39 | //go:embed gateway-tenants.yaml 40 | lokiGatewayTenantsYAMLTmplFile embed.FS 41 | 42 | //go:embed lokistack-gateway.rego 43 | lokiStackGatewayRegoTmplFile embed.FS 44 | 45 | lokiGatewayRbacYAMLTmpl = template.Must(template.ParseFS(lokiGatewayRbacYAMLTmplFile, "gateway-rbac.yaml")) 46 | 47 | lokiGatewayTenantsYAMLTmpl = template.Must(template.ParseFS(lokiGatewayTenantsYAMLTmplFile, "gateway-tenants.yaml")) 48 | 49 | lokiStackGatewayRegoTmpl = template.Must(template.ParseFS(lokiStackGatewayRegoTmplFile, "lokistack-gateway.rego")) 50 | ) 51 | 52 | // Build builds a loki gateway configuration files 53 | func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err error) { 54 | // Build loki gateway rbac yaml 55 | w := bytes.NewBuffer(nil) 56 | err = lokiGatewayRbacYAMLTmpl.Execute(w, opts) 57 | if err != nil { 58 | return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway rbac configuration") 59 | } 60 | rbacCfg, err = ioutil.ReadAll(w) 61 | if err != nil { 62 | return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") 63 | } 64 | // Build loki gateway tenants yaml 65 | w = bytes.NewBuffer(nil) 66 | err = lokiGatewayTenantsYAMLTmpl.Execute(w, opts) 67 | if err != nil { 68 | return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway tenants configuration") 69 | } 70 | tenantsCfg, err = ioutil.ReadAll(w) 71 | if err != nil { 72 | return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") 73 | } 74 | // Build loki gateway observatorium rego for static mode 75 | if opts.Stack.Tenants.Mode == lokiv1beta1.Static { 76 | w = bytes.NewBuffer(nil) 77 | err = lokiStackGatewayRegoTmpl.Execute(w, opts) 78 | if err != nil { 79 | return nil, nil, nil, kverrors.Wrap(err, "failed to create lokistack gateway rego configuration") 80 | } 81 | regoCfg, err = ioutil.ReadAll(w) 82 | if err != nil { 83 | return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer") 84 | } 85 | return rbacCfg, tenantsCfg, regoCfg, nil 86 | } 87 | return rbacCfg, tenantsCfg, nil, nil 88 | } 89 | -------------------------------------------------------------------------------- /internal/manifests/internal/gateway/gateway-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Stack.Tenants.Mode "static" -}} 2 | roleBindings: 3 | {{- range $spec := .Stack.Tenants.Authorization.RoleBindings }} 4 | - name: {{ $spec.Name }} 5 | roles: 6 | {{- range $role := $spec.Roles }} 7 | - {{ $role }} 8 | {{- end -}} 9 | {{ print "\n" }} 10 | subjects: 11 | {{- range $subject := $spec.Subjects }} 12 | - kind: {{ $subject.Kind }} 13 | name: {{ $subject.Name }} 14 | {{- end -}} 15 | {{- end -}} 16 | {{ print "\n" }} 17 | roles: 18 | {{- range $spec := .Stack.Tenants.Authorization.Roles }} 19 | - name: {{ $spec.Name }} 20 | permissions: 21 | {{- range $permission := $spec.Permissions }} 22 | - {{ $permission }} 23 | {{- end -}} 24 | {{ print "\n" }} 25 | resources: 26 | {{- range $resource := $spec.Resources }} 27 | - {{ $resource }} 28 | {{- end -}} 29 | {{ print "\n" }} 30 | tenants: 31 | {{- range $tenant := $spec.Tenants }} 32 | - {{ $tenant }} 33 | {{- end -}} 34 | {{- end -}} 35 | {{- end -}} 36 | -------------------------------------------------------------------------------- /internal/manifests/internal/gateway/gateway-tenants.yaml: -------------------------------------------------------------------------------- 1 | tenants: 2 | {{- if $l := . -}} 3 | {{- if eq $l.Stack.Tenants.Mode "static" -}} 4 | {{- range $spec := $l.Stack.Tenants.Authentication }} 5 | - name: {{ $spec.TenantName }} 6 | id: {{ $spec.TenantID }} 7 | oidc: 8 | {{- range $secret := $l.TenantSecrets }} 9 | {{- if eq $secret.TenantName $spec.TenantName -}} 10 | {{ if $secret.ClientID }} 11 | clientID: {{ $secret.ClientID }} 12 | {{- end -}} 13 | {{ if $secret.ClientSecret }} 14 | clientSecret: {{ $secret.ClientSecret }} 15 | {{- end -}} 16 | {{ if $secret.IssuerCAPath }} 17 | issuerCAPath: {{ $secret.IssuerCAPath }} 18 | {{- end -}} 19 | {{- end -}} 20 | {{- end }} 21 | issuerURL: {{ $spec.OIDC.IssuerURL }} 22 | redirectURL: {{ $spec.OIDC.RedirectURL }} 23 | {{ if $spec.OIDC.UsernameClaim }} 24 | usernameClaim: {{ $spec.OIDC.UsernameClaim }} 25 | {{- end -}} 26 | {{- if $spec.OIDC.GroupClaim }} 27 | groupClaim: {{ $spec.OIDC.GroupClaim }} 28 | {{- end }} 29 | opa: 30 | query: data.lokistack.allow 31 | paths: 32 | - /etc/lokistack-gateway/rbac.yaml 33 | - /etc/lokistack-gateway/lokistack-gateway.rego 34 | {{- end -}} 35 | {{- else if eq $l.Stack.Tenants.Mode "dynamic" -}} 36 | {{- if $tenant := $l.Stack.Tenants -}} 37 | {{- range $spec := $tenant.Authentication }} 38 | - name: {{ $spec.TenantName }} 39 | id: {{ $spec.TenantID }} 40 | oidc: 41 | {{- range $secret := $l.TenantSecrets }} 42 | {{- if eq $secret.TenantName $spec.TenantName -}} 43 | {{ if $secret.ClientID }} 44 | clientID: {{ $secret.ClientID }} 45 | {{- end -}} 46 | {{ if $secret.ClientSecret }} 47 | clientSecret: {{ $secret.ClientSecret }} 48 | {{- end -}} 49 | {{ if $secret.IssuerCAPath }} 50 | issuerCAPath: {{ $secret.IssuerCAPath }} 51 | {{- end -}} 52 | {{- end -}} 53 | {{- end }} 54 | issuerURL: {{ $spec.OIDC.IssuerURL }} 55 | redirectURL: {{ $spec.OIDC.RedirectURL }} 56 | {{- if $spec.OIDC.UsernameClaim }} 57 | usernameClaim: {{ $spec.OIDC.UsernameClaim }} 58 | {{- end -}} 59 | {{- if $spec.OIDC.GroupClaim }} 60 | groupClaim: {{ $spec.OIDC.GroupClaim }} 61 | {{- end }} 62 | opa: 63 | url: {{ $tenant.Authorization.OPA.URL }} 64 | {{- end -}} 65 | {{- end -}} 66 | {{- else if eq $l.Stack.Tenants.Mode "openshift-logging" -}} 67 | {{- if $tenant := $l.OpenShiftOptions.Authentication -}} 68 | {{- range $spec := $l.OpenShiftOptions.Authentication }} 69 | - name: {{ $spec.TenantName }} 70 | id: {{ $spec.TenantID }} 71 | openshift: 72 | serviceAccount: {{ $spec.ServiceAccount }} 73 | redirectURL: {{ $spec.RedirectURL }} 74 | cookieSecret: {{ $spec.CookieSecret }} 75 | opa: 76 | url: {{ $l.OpenShiftOptions.Authorization.OPAUrl }} 77 | withAccessToken: true 78 | {{- end -}} 79 | {{- end -}} 80 | {{- end -}} 81 | {{- end -}} 82 | -------------------------------------------------------------------------------- /internal/manifests/internal/gateway/lokistack-gateway.rego: -------------------------------------------------------------------------------- 1 | package lokistack 2 | 3 | import input 4 | import data.roles 5 | import data.roleBindings 6 | 7 | default allow = false 8 | 9 | allow { 10 | some roleNames 11 | roleNames = roleBindings[matched_role_binding[_]].roles 12 | roles[i].name == roleNames[_] 13 | roles[i].resources[_] = input.resource 14 | roles[i].permissions[_] = input.permission 15 | roles[i].tenants[_] = input.tenant 16 | } 17 | 18 | matched_role_binding[i] { 19 | roleBindings[i].subjects[_] == {"name": input.subject, "kind": "user"} 20 | } 21 | 22 | matched_role_binding[i] { 23 | roleBindings[i].subjects[_] == {"name": input.groups[_], "kind": "group"} 24 | } 25 | -------------------------------------------------------------------------------- /internal/manifests/internal/gateway/options.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 5 | "github.com/ViaQ/loki-operator/internal/manifests/openshift" 6 | ) 7 | 8 | // Options is used to render the rbac.yaml and tenants.yaml file template 9 | type Options struct { 10 | Stack lokiv1beta1.LokiStackSpec 11 | 12 | Namespace string 13 | Name string 14 | StorageDirectory string 15 | 16 | OpenShiftOptions openshift.Options 17 | TenantSecrets []*Secret 18 | TenantConfigMap map[string]TenantData 19 | } 20 | 21 | // Secret for clientID, clientSecret and issuerCAPath for tenant's authentication. 22 | type Secret struct { 23 | TenantName string 24 | ClientID string 25 | ClientSecret string 26 | IssuerCAPath string 27 | } 28 | 29 | // TenantData defines the existing tenantID and cookieSecret for lokistack reconcile. 30 | type TenantData struct { 31 | TenantID string 32 | CookieSecret string 33 | } 34 | -------------------------------------------------------------------------------- /internal/manifests/memberlist.go: -------------------------------------------------------------------------------- 1 | package manifests 2 | 3 | import ( 4 | "fmt" 5 | 6 | "k8s.io/apimachinery/pkg/util/intstr" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | // BuildLokiGossipRingService creates a k8s service for the gossip/memberlist members of the cluster 13 | func BuildLokiGossipRingService(stackName string) *corev1.Service { 14 | return &corev1.Service{ 15 | TypeMeta: metav1.TypeMeta{ 16 | Kind: "Service", 17 | APIVersion: corev1.SchemeGroupVersion.String(), 18 | }, 19 | ObjectMeta: metav1.ObjectMeta{ 20 | Name: fmt.Sprintf("loki-gossip-ring-%s", stackName), 21 | Labels: commonLabels(stackName), 22 | }, 23 | Spec: corev1.ServiceSpec{ 24 | ClusterIP: "None", 25 | Ports: []corev1.ServicePort{ 26 | { 27 | Name: lokiGossipPortName, 28 | Port: gossipPort, 29 | Protocol: protocolTCP, 30 | TargetPort: intstr.IntOrString{IntVal: gossipPort}, 31 | }, 32 | }, 33 | Selector: commonLabels(stackName), 34 | }, 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /internal/manifests/openshift/build.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import "sigs.k8s.io/controller-runtime/pkg/client" 4 | 5 | // Build returns a list of auxiliary openshift/k8s objects 6 | // for lokistack gateway deployments on OpenShift. 7 | func Build(opts Options) []client.Object { 8 | objs := []client.Object{ 9 | BuildRoute(opts), 10 | BuildServiceAccount(opts), 11 | BuildClusterRole(opts), 12 | BuildClusterRoleBinding(opts), 13 | } 14 | 15 | if opts.BuildOpts.EnableCertificateSigningService { 16 | objs = append(objs, BuildServiceCAConfigMap(opts)) 17 | } 18 | 19 | return objs 20 | } 21 | -------------------------------------------------------------------------------- /internal/manifests/openshift/build_test.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | 9 | routev1 "github.com/openshift/api/route/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | rbacv1 "k8s.io/api/rbac/v1" 12 | ) 13 | 14 | func TestBuild_ServiceAccountRefMatches(t *testing.T) { 15 | opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) 16 | 17 | objs := Build(opts) 18 | sa := objs[1].(*corev1.ServiceAccount) 19 | rb := objs[3].(*rbacv1.ClusterRoleBinding) 20 | 21 | require.Equal(t, sa.Kind, rb.Subjects[0].Kind) 22 | require.Equal(t, sa.Name, rb.Subjects[0].Name) 23 | require.Equal(t, sa.Namespace, rb.Subjects[0].Namespace) 24 | } 25 | 26 | func TestBuild_ClusterRoleRefMatches(t *testing.T) { 27 | opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) 28 | 29 | objs := Build(opts) 30 | cr := objs[2].(*rbacv1.ClusterRole) 31 | rb := objs[3].(*rbacv1.ClusterRoleBinding) 32 | 33 | require.Equal(t, cr.Kind, rb.RoleRef.Kind) 34 | require.Equal(t, cr.Name, rb.RoleRef.Name) 35 | } 36 | 37 | func TestBuild_ServiceAccountAnnotationsRouteRefMatches(t *testing.T) { 38 | opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) 39 | 40 | objs := Build(opts) 41 | rt := objs[0].(*routev1.Route) 42 | sa := objs[1].(*corev1.ServiceAccount) 43 | 44 | type oauthRedirectReference struct { 45 | Kind string `json:"kind"` 46 | APIVersion string `json:"apiVersion"` 47 | Ref *struct { 48 | Kind string `json:"kind"` 49 | Name string `json:"name"` 50 | } `json:"reference"` 51 | } 52 | 53 | for _, a := range sa.Annotations { 54 | oauthRef := oauthRedirectReference{} 55 | err := json.Unmarshal([]byte(a), &oauthRef) 56 | require.NoError(t, err) 57 | 58 | require.Equal(t, rt.Name, oauthRef.Ref.Name) 59 | require.Equal(t, rt.Kind, oauthRef.Ref.Kind) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /internal/manifests/openshift/configure.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strings" 7 | 8 | "github.com/ViaQ/logerr/kverrors" 9 | "github.com/imdario/mergo" 10 | 11 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 12 | appsv1 "k8s.io/api/apps/v1" 13 | corev1 "k8s.io/api/core/v1" 14 | ) 15 | 16 | const ( 17 | // tenantApplication is the name of the tenant holding application logs. 18 | tenantApplication = "application" 19 | // tenantInfrastructure is the name of the tenant holding infrastructure logs. 20 | tenantInfrastructure = "infrastructure" 21 | // tenantAudit is the name of the tenant holding audit logs. 22 | tenantAudit = "audit" 23 | ) 24 | 25 | var ( 26 | // defaultTenants represents the slice of all supported LokiStack on OpenShift. 27 | defaultTenants = []string{ 28 | tenantApplication, 29 | tenantInfrastructure, 30 | tenantAudit, 31 | } 32 | 33 | logsEndpointRe = regexp.MustCompile(`.*logs..*.endpoint.*`) 34 | ) 35 | 36 | // ConfigureGatewayDeployment merges an OpenPolicyAgent sidecar into the deployment spec. 37 | // With this, the deployment will route authorization request to the OpenShift 38 | // apiserver through the sidecar. 39 | func ConfigureGatewayDeployment( 40 | d *appsv1.Deployment, 41 | gwContainerName string, 42 | sercretVolumeName, tlsDir, certFile, keyFile string, 43 | caDir, caFile string, 44 | withTLS, withCertSigningService bool, 45 | ) error { 46 | var gwIndex int 47 | for i, c := range d.Spec.Template.Spec.Containers { 48 | if c.Name == gwContainerName { 49 | gwIndex = i 50 | break 51 | } 52 | } 53 | 54 | gwContainer := d.Spec.Template.Spec.Containers[gwIndex].DeepCopy() 55 | gwArgs := gwContainer.Args 56 | gwVolumes := d.Spec.Template.Spec.Volumes 57 | 58 | if withCertSigningService { 59 | for i, a := range gwArgs { 60 | if logsEndpointRe.MatchString(a) { 61 | gwContainer.Args[i] = strings.Replace(a, "http", "https", 1) 62 | } 63 | } 64 | 65 | gwArgs = append(gwArgs, fmt.Sprintf("--logs.tls.ca-file=%s/%s", caDir, caFile)) 66 | 67 | caBundleVolumeName := serviceCABundleName(Options{ 68 | BuildOpts: BuildOptions{ 69 | GatewayName: d.GetName(), 70 | }, 71 | }) 72 | 73 | gwContainer.VolumeMounts = append(gwContainer.VolumeMounts, corev1.VolumeMount{ 74 | Name: caBundleVolumeName, 75 | ReadOnly: true, 76 | MountPath: caDir, 77 | }) 78 | 79 | gwVolumes = append(gwVolumes, corev1.Volume{ 80 | Name: caBundleVolumeName, 81 | VolumeSource: corev1.VolumeSource{ 82 | ConfigMap: &corev1.ConfigMapVolumeSource{ 83 | DefaultMode: &defaultConfigMapMode, 84 | LocalObjectReference: corev1.LocalObjectReference{ 85 | Name: caBundleVolumeName, 86 | }, 87 | }, 88 | }, 89 | }) 90 | } 91 | 92 | gwContainer.Args = gwArgs 93 | 94 | p := corev1.PodSpec{ 95 | ServiceAccountName: d.GetName(), 96 | Containers: []corev1.Container{ 97 | *gwContainer, 98 | newOPAOpenShiftContainer(sercretVolumeName, tlsDir, certFile, keyFile, withTLS), 99 | }, 100 | Volumes: gwVolumes, 101 | } 102 | 103 | if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil { 104 | return kverrors.Wrap(err, "failed to merge sidecar container spec ") 105 | } 106 | 107 | return nil 108 | } 109 | 110 | // ConfigureGatewayService merges the OpenPolicyAgent sidecar metrics port into 111 | // the service spec. With this the metrics are exposed through the same service. 112 | func ConfigureGatewayService(s *corev1.ServiceSpec) error { 113 | spec := corev1.ServiceSpec{ 114 | Ports: []corev1.ServicePort{ 115 | { 116 | Name: opaMetricsPortName, 117 | Port: GatewayOPAInternalPort, 118 | }, 119 | }, 120 | } 121 | 122 | if err := mergo.Merge(s, spec, mergo.WithAppendSlice); err != nil { 123 | return kverrors.Wrap(err, "failed to merge sidecar service ports") 124 | } 125 | 126 | return nil 127 | } 128 | 129 | // ConfigureGatewayServiceMonitor merges the OpenPolicyAgent sidecar endpoint into 130 | // the service monitor. With this cluster-monitoring prometheus can scrape 131 | // the sidecar metrics. 132 | func ConfigureGatewayServiceMonitor(sm *monitoringv1.ServiceMonitor, withTLS bool) error { 133 | var opaEndpoint monitoringv1.Endpoint 134 | 135 | if withTLS { 136 | tlsConfig := sm.Spec.Endpoints[0].TLSConfig 137 | opaEndpoint = monitoringv1.Endpoint{ 138 | Port: opaMetricsPortName, 139 | Path: "/metrics", 140 | Scheme: "https", 141 | BearerTokenFile: bearerTokenFile, 142 | TLSConfig: tlsConfig, 143 | } 144 | } else { 145 | opaEndpoint = monitoringv1.Endpoint{ 146 | Port: opaMetricsPortName, 147 | Path: "/metrics", 148 | Scheme: "http", 149 | } 150 | } 151 | 152 | spec := monitoringv1.ServiceMonitorSpec{ 153 | Endpoints: []monitoringv1.Endpoint{opaEndpoint}, 154 | } 155 | 156 | if err := mergo.Merge(&sm.Spec, spec, mergo.WithAppendSlice); err != nil { 157 | return kverrors.Wrap(err, "failed to merge sidecar service monitor endpoints") 158 | } 159 | 160 | return nil 161 | } 162 | -------------------------------------------------------------------------------- /internal/manifests/openshift/opa_openshift.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/util/intstr" 10 | ) 11 | 12 | const ( 13 | envRelatedImageOPA = "RELATED_IMAGE_OPA" 14 | defaultOPAImage = "quay.io/observatorium/opa-openshift:latest" 15 | opaContainerName = "opa" 16 | opaDefaultPackage = "lokistack" 17 | opaDefaultAPIGroup = "loki.openshift.io" 18 | opaMetricsPortName = "opa-metrics" 19 | ) 20 | 21 | func newOPAOpenShiftContainer(sercretVolumeName, tlsDir, certFile, keyFile string, withTLS bool) corev1.Container { 22 | var ( 23 | image string 24 | args []string 25 | uriScheme corev1.URIScheme 26 | volumeMounts []corev1.VolumeMount 27 | ) 28 | 29 | image = os.Getenv(envRelatedImageOPA) 30 | if image == "" { 31 | image = defaultOPAImage 32 | } 33 | 34 | uriScheme = corev1.URISchemeHTTP 35 | args = []string{ 36 | "--log.level=warn", 37 | fmt.Sprintf("--opa.package=%s", opaDefaultPackage), 38 | fmt.Sprintf("--web.listen=:%d", GatewayOPAHTTPPort), 39 | fmt.Sprintf("--web.internal.listen=:%d", GatewayOPAInternalPort), 40 | fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", GatewayOPAHTTPPort), 41 | } 42 | 43 | if withTLS { 44 | certFilePath := path.Join(tlsDir, certFile) 45 | keyFilePath := path.Join(tlsDir, keyFile) 46 | 47 | args = append(args, []string{ 48 | fmt.Sprintf("--tls.internal.server.cert-file=%s", certFilePath), 49 | fmt.Sprintf("--tls.internal.server.key-file=%s", keyFilePath), 50 | }...) 51 | 52 | uriScheme = corev1.URISchemeHTTPS 53 | 54 | volumeMounts = []corev1.VolumeMount{ 55 | { 56 | Name: sercretVolumeName, 57 | ReadOnly: true, 58 | MountPath: tlsDir, 59 | }, 60 | } 61 | } 62 | 63 | for _, t := range defaultTenants { 64 | args = append(args, fmt.Sprintf(`--openshift.mappings=%s=%s`, t, opaDefaultAPIGroup)) 65 | } 66 | 67 | return corev1.Container{ 68 | Name: opaContainerName, 69 | Image: image, 70 | Args: args, 71 | Ports: []corev1.ContainerPort{ 72 | { 73 | Name: GatewayOPAHTTPPortName, 74 | ContainerPort: GatewayOPAHTTPPort, 75 | Protocol: corev1.ProtocolTCP, 76 | }, 77 | { 78 | Name: GatewayOPAInternalPortName, 79 | ContainerPort: GatewayOPAInternalPort, 80 | Protocol: corev1.ProtocolTCP, 81 | }, 82 | }, 83 | LivenessProbe: &corev1.Probe{ 84 | Handler: corev1.Handler{ 85 | HTTPGet: &corev1.HTTPGetAction{ 86 | Path: "/live", 87 | Port: intstr.FromInt(int(GatewayOPAInternalPort)), 88 | Scheme: uriScheme, 89 | }, 90 | }, 91 | TimeoutSeconds: 2, 92 | PeriodSeconds: 30, 93 | FailureThreshold: 10, 94 | }, 95 | ReadinessProbe: &corev1.Probe{ 96 | Handler: corev1.Handler{ 97 | HTTPGet: &corev1.HTTPGetAction{ 98 | Path: "/ready", 99 | Port: intstr.FromInt(int(GatewayOPAInternalPort)), 100 | Scheme: uriScheme, 101 | }, 102 | }, 103 | TimeoutSeconds: 1, 104 | PeriodSeconds: 5, 105 | FailureThreshold: 12, 106 | }, 107 | VolumeMounts: volumeMounts, 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /internal/manifests/openshift/options.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | 7 | "github.com/google/uuid" 8 | ) 9 | 10 | // Options is the set of internal template options for rendering 11 | // the lokistack-gateway tenants configuration file when mode openshift-logging. 12 | type Options struct { 13 | BuildOpts BuildOptions 14 | Authentication []AuthenticationSpec 15 | Authorization AuthorizationSpec 16 | } 17 | 18 | // AuthenticationSpec describes the authentication specification 19 | // for a single tenant to authenticate it's subjects through OpenShift Auth. 20 | type AuthenticationSpec struct { 21 | TenantName string 22 | TenantID string 23 | ServiceAccount string 24 | RedirectURL string 25 | CookieSecret string 26 | } 27 | 28 | // AuthorizationSpec describes the authorization specification 29 | // for all tenants to authorize access for it's subjects through the 30 | // opa-openshift sidecar. 31 | type AuthorizationSpec struct { 32 | OPAUrl string 33 | } 34 | 35 | // BuildOptions represents the set of options required to build 36 | // extra lokistack gateway k8s objects (e.g. ServiceAccount, Route, RBAC) 37 | // on openshift. 38 | type BuildOptions struct { 39 | LokiStackName string 40 | GatewayName string 41 | GatewayNamespace string 42 | GatewaySvcName string 43 | GatewaySvcTargetPort string 44 | Labels map[string]string 45 | EnableCertificateSigningService bool 46 | } 47 | 48 | // TenantData defines the existing tenantID and cookieSecret for lokistack reconcile. 49 | type TenantData struct { 50 | TenantID string 51 | CookieSecret string 52 | } 53 | 54 | // NewOptions returns an openshift options struct. 55 | func NewOptions( 56 | stackName string, 57 | gwName, gwNamespace, gwBaseDomain, gwSvcName, gwPortName string, 58 | gwLabels map[string]string, 59 | enableCertSigningService bool, 60 | tenantConfigMap map[string]TenantData, 61 | ) Options { 62 | host := ingressHost(stackName, gwNamespace, gwBaseDomain) 63 | 64 | var authn []AuthenticationSpec 65 | for _, name := range defaultTenants { 66 | if tenantConfigMap != nil { 67 | authn = append(authn, AuthenticationSpec{ 68 | TenantName: name, 69 | TenantID: tenantConfigMap[name].TenantID, 70 | ServiceAccount: gwName, 71 | RedirectURL: fmt.Sprintf("http://%s/openshift/%s/callback", host, name), 72 | CookieSecret: tenantConfigMap[name].CookieSecret, 73 | }) 74 | } else { 75 | authn = append(authn, AuthenticationSpec{ 76 | TenantName: name, 77 | TenantID: uuid.New().String(), 78 | ServiceAccount: gwName, 79 | RedirectURL: fmt.Sprintf("http://%s/openshift/%s/callback", host, name), 80 | CookieSecret: newCookieSecret(), 81 | }) 82 | } 83 | } 84 | 85 | return Options{ 86 | BuildOpts: BuildOptions{ 87 | LokiStackName: stackName, 88 | GatewayName: gwName, 89 | GatewayNamespace: gwNamespace, 90 | GatewaySvcName: gwSvcName, 91 | GatewaySvcTargetPort: gwPortName, 92 | Labels: gwLabels, 93 | EnableCertificateSigningService: enableCertSigningService, 94 | }, 95 | Authentication: authn, 96 | Authorization: AuthorizationSpec{ 97 | OPAUrl: fmt.Sprintf("http://localhost:%d/v1/data/%s/allow", GatewayOPAHTTPPort, opaDefaultPackage), 98 | }, 99 | } 100 | } 101 | 102 | func newCookieSecret() string { 103 | b := make([]rune, cookieSecretLength) 104 | for i := range b { 105 | b[i] = allowedRunes[rand.Intn(len(allowedRunes))] 106 | } 107 | 108 | return string(b) 109 | } 110 | -------------------------------------------------------------------------------- /internal/manifests/openshift/rbac.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | rbacv1 "k8s.io/api/rbac/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // BuildClusterRole returns a k8s ClusterRole object for the 9 | // lokistack gateway serviceaccount to allow creating: 10 | // - TokenReviews to authenticate the user by bearer token. 11 | // - SubjectAccessReview to authorize the user by bearer token. 12 | // if having access to read/create logs. 13 | func BuildClusterRole(opts Options) *rbacv1.ClusterRole { 14 | return &rbacv1.ClusterRole{ 15 | TypeMeta: metav1.TypeMeta{ 16 | Kind: "ClusterRole", 17 | APIVersion: rbacv1.SchemeGroupVersion.String(), 18 | }, 19 | ObjectMeta: metav1.ObjectMeta{ 20 | Name: clusterRoleName(opts), 21 | Labels: opts.BuildOpts.Labels, 22 | }, 23 | Rules: []rbacv1.PolicyRule{ 24 | { 25 | APIGroups: []string{ 26 | "authentication.k8s.io", 27 | }, 28 | Resources: []string{ 29 | "tokenreviews", 30 | }, 31 | Verbs: []string{ 32 | "create", 33 | }, 34 | }, 35 | { 36 | APIGroups: []string{ 37 | "authorization.k8s.io", 38 | }, 39 | Resources: []string{ 40 | "subjectaccessreviews", 41 | }, 42 | Verbs: []string{ 43 | "create", 44 | }, 45 | }, 46 | }, 47 | } 48 | } 49 | 50 | // BuildClusterRoleBinding returns a k8s ClusterRoleBinding object for 51 | // the lokistack gateway serviceaccount to grant access to: 52 | // - rbac.authentication.k8s.io/TokenReviews 53 | // - rbac.authorization.k8s.io/SubjectAccessReviews 54 | func BuildClusterRoleBinding(opts Options) *rbacv1.ClusterRoleBinding { 55 | return &rbacv1.ClusterRoleBinding{ 56 | TypeMeta: metav1.TypeMeta{ 57 | Kind: "ClusterRoleBinding", 58 | APIVersion: rbacv1.SchemeGroupVersion.String(), 59 | }, 60 | ObjectMeta: metav1.ObjectMeta{ 61 | Name: opts.BuildOpts.GatewayName, 62 | Labels: opts.BuildOpts.Labels, 63 | }, 64 | RoleRef: rbacv1.RoleRef{ 65 | APIGroup: "rbac.authorization.k8s.io", 66 | Kind: "ClusterRole", 67 | Name: clusterRoleName(opts), 68 | }, 69 | Subjects: []rbacv1.Subject{ 70 | { 71 | Kind: rbacv1.ServiceAccountKind, 72 | Name: serviceAccountName(opts), 73 | Namespace: opts.BuildOpts.GatewayNamespace, 74 | }, 75 | }, 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /internal/manifests/openshift/route.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | routev1 "github.com/openshift/api/route/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "k8s.io/apimachinery/pkg/util/intstr" 7 | "k8s.io/utils/pointer" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // BuildRoute builds an OpenShift route object for the LokiStack Gateway 12 | func BuildRoute(opts Options) client.Object { 13 | return &routev1.Route{ 14 | TypeMeta: metav1.TypeMeta{ 15 | Kind: "Route", 16 | APIVersion: routev1.SchemeGroupVersion.String(), 17 | }, 18 | ObjectMeta: metav1.ObjectMeta{ 19 | Name: routeName(opts), 20 | Namespace: opts.BuildOpts.GatewayNamespace, 21 | Labels: opts.BuildOpts.Labels, 22 | }, 23 | Spec: routev1.RouteSpec{ 24 | To: routev1.RouteTargetReference{ 25 | Kind: "Service", 26 | Name: opts.BuildOpts.GatewaySvcName, 27 | Weight: pointer.Int32(100), 28 | }, 29 | Port: &routev1.RoutePort{ 30 | TargetPort: intstr.FromString(opts.BuildOpts.GatewaySvcTargetPort), 31 | }, 32 | WildcardPolicy: routev1.WildcardPolicyNone, 33 | }, 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /internal/manifests/openshift/service_ca.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // BuildServiceCAConfigMap returns a k8s configmap for the LokiStack 9 | // gateway serviceCA configmap. This configmap is used to configure 10 | // the gateway to proxy server-side TLS encrypted requests to Loki. 11 | func BuildServiceCAConfigMap(opts Options) *corev1.ConfigMap { 12 | return &corev1.ConfigMap{ 13 | TypeMeta: metav1.TypeMeta{ 14 | Kind: "ConfigMap", 15 | APIVersion: corev1.SchemeGroupVersion.String(), 16 | }, 17 | ObjectMeta: metav1.ObjectMeta{ 18 | Annotations: map[string]string{ 19 | InjectCABundleKey: "true", 20 | }, 21 | Labels: opts.BuildOpts.Labels, 22 | Name: serviceCABundleName(opts), 23 | Namespace: opts.BuildOpts.GatewayNamespace, 24 | }, 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /internal/manifests/openshift/serviceaccount.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "k8s.io/utils/pointer" 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | ) 9 | 10 | // BuildServiceAccount returns a k8s object for the LokiStack Gateway 11 | // serviceaccount. This ServiceAccount is used in parallel as an 12 | // OpenShift OAuth Client. 13 | func BuildServiceAccount(opts Options) client.Object { 14 | return &corev1.ServiceAccount{ 15 | TypeMeta: metav1.TypeMeta{ 16 | Kind: "ServiceAccount", 17 | APIVersion: corev1.SchemeGroupVersion.String(), 18 | }, 19 | ObjectMeta: metav1.ObjectMeta{ 20 | Annotations: serviceAccountAnnotations(opts), 21 | Labels: opts.BuildOpts.Labels, 22 | Name: serviceAccountName(opts), 23 | Namespace: opts.BuildOpts.GatewayNamespace, 24 | }, 25 | AutomountServiceAccountToken: pointer.Bool(true), 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /internal/manifests/openshift/serviceaccount_test.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestBuildServiceAccount_AnnotationsMatchDefaultTenants(t *testing.T) { 11 | opts := NewOptions("abc", "abc", "efgh", "example.com", "abc", "abc", map[string]string{}, false, map[string]TenantData{}) 12 | 13 | sa := BuildServiceAccount(opts) 14 | require.Len(t, sa.GetAnnotations(), len(defaultTenants)) 15 | 16 | var keys []string 17 | for key := range sa.GetAnnotations() { 18 | keys = append(keys, key) 19 | } 20 | 21 | for _, name := range defaultTenants { 22 | v := fmt.Sprintf("serviceaccounts.openshift.io/oauth-redirectreference.%s", name) 23 | require.Contains(t, keys, v) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /internal/manifests/openshift/var.go: -------------------------------------------------------------------------------- 1 | package openshift 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | var ( 8 | // GatewayOPAHTTPPort is the HTTP port of the OpenPolicyAgent sidecar. 9 | GatewayOPAHTTPPort int32 = 8082 10 | // GatewayOPAInternalPort is the HTTP metrics port of the OpenPolicyAgent sidecar. 11 | GatewayOPAInternalPort int32 = 8083 12 | 13 | // GatewayOPAHTTPPortName is the HTTP container port name of the OpenPolicyAgent sidecar. 14 | GatewayOPAHTTPPortName = "public" 15 | // GatewayOPAInternalPortName is the HTTP container metrics port name of the OpenPolicyAgent sidecar. 16 | GatewayOPAInternalPortName = "opa-metrics" 17 | 18 | bearerTokenFile string = "/var/run/secrets/kubernetes.io/serviceaccount/token" 19 | 20 | cookieSecretLength = 32 21 | allowedRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") 22 | 23 | defaultConfigMapMode = int32(420) 24 | 25 | // ServingCertKey is the annotation key for services used the 26 | // cert-signing service to create a new key/cert pair signed 27 | // by the service CA stored in a secret with the same name 28 | // as the annotated service. 29 | ServingCertKey = "service.beta.openshift.io/serving-cert-secret-name" 30 | // InjectCABundleKey is the annotation key for configmaps used by the 31 | // cert-signing service to inject the service CA into the annotated 32 | // configmap. 33 | InjectCABundleKey = "service.beta.openshift.io/inject-cabundle" 34 | ) 35 | 36 | func clusterRoleName(opts Options) string { 37 | return opts.BuildOpts.GatewayName 38 | } 39 | 40 | func ingressHost(stackName, namespace, baseDomain string) string { 41 | return fmt.Sprintf("%s-%s.apps.%s", stackName, namespace, baseDomain) 42 | } 43 | 44 | func routeName(opts Options) string { 45 | return opts.BuildOpts.LokiStackName 46 | } 47 | 48 | func serviceAccountName(opts Options) string { 49 | return opts.BuildOpts.GatewayName 50 | } 51 | 52 | func serviceCABundleName(opts Options) string { 53 | return fmt.Sprintf("%s-ca-bundle", opts.BuildOpts.GatewayName) 54 | } 55 | 56 | func serviceAccountAnnotations(opts Options) map[string]string { 57 | a := make(map[string]string, len(opts.Authentication)) 58 | for _, auth := range opts.Authentication { 59 | key := fmt.Sprintf("serviceaccounts.openshift.io/oauth-redirectreference.%s", auth.TenantName) 60 | value := fmt.Sprintf("{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"%s\"}}", routeName(opts)) 61 | a[key] = value 62 | } 63 | 64 | return a 65 | } 66 | -------------------------------------------------------------------------------- /internal/manifests/options.go: -------------------------------------------------------------------------------- 1 | package manifests 2 | 3 | import ( 4 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 5 | "github.com/ViaQ/loki-operator/internal/manifests/internal" 6 | "github.com/ViaQ/loki-operator/internal/manifests/openshift" 7 | ) 8 | 9 | // Options is a set of configuration values to use when building manifests such as resource sizes, etc. 10 | // Most of this should be provided - either directly or indirectly - by the user. 11 | type Options struct { 12 | Name string 13 | Namespace string 14 | Image string 15 | GatewayImage string 16 | GatewayBaseDomain string 17 | ConfigSHA1 string 18 | 19 | Flags FeatureFlags 20 | 21 | Stack lokiv1beta1.LokiStackSpec 22 | ResourceRequirements internal.ComponentResources 23 | 24 | ObjectStorage ObjectStorage 25 | 26 | OpenShiftOptions openshift.Options 27 | TenantSecrets []*TenantSecrets 28 | TenantConfigMap map[string]openshift.TenantData 29 | } 30 | 31 | // ObjectStorage for storage config. 32 | type ObjectStorage struct { 33 | Endpoint string 34 | Region string 35 | Buckets string 36 | AccessKeyID string 37 | AccessKeySecret string 38 | } 39 | 40 | // FeatureFlags contains flags that activate various features 41 | type FeatureFlags struct { 42 | EnableCertificateSigningService bool 43 | EnableServiceMonitors bool 44 | EnableTLSServiceMonitorConfig bool 45 | EnableGateway bool 46 | EnableGatewayRoute bool 47 | } 48 | 49 | // TenantSecrets for clientID, clientSecret and issuerCAPath for tenant's authentication. 50 | type TenantSecrets struct { 51 | TenantName string 52 | ClientID string 53 | ClientSecret string 54 | IssuerCAPath string 55 | } 56 | -------------------------------------------------------------------------------- /internal/manifests/querier_test.go: -------------------------------------------------------------------------------- 1 | package manifests_test 2 | 3 | import ( 4 | "testing" 5 | 6 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 7 | "github.com/ViaQ/loki-operator/internal/manifests" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestNewQuerierDeployment_HasTemplateConfigHashAnnotation(t *testing.T) { 12 | ss := manifests.NewQuerierDeployment(manifests.Options{ 13 | Name: "abcd", 14 | Namespace: "efgh", 15 | ConfigSHA1: "deadbeef", 16 | Stack: lokiv1beta1.LokiStackSpec{ 17 | StorageClassName: "standard", 18 | Template: &lokiv1beta1.LokiTemplateSpec{ 19 | Querier: &lokiv1beta1.LokiComponentSpec{ 20 | Replicas: 1, 21 | }, 22 | }, 23 | }, 24 | }) 25 | 26 | expected := "loki.openshift.io/config-hash" 27 | annotations := ss.Spec.Template.Annotations 28 | require.Contains(t, annotations, expected) 29 | require.Equal(t, annotations[expected], "deadbeef") 30 | } 31 | 32 | func TestNewQuerierDeployment_SelectorMatchesLabels(t *testing.T) { 33 | // You must set the .spec.selector field of a Deployment to match the labels of 34 | // its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the 35 | // .spec.selector field was defaulted when omitted. In 1.8 and later versions, 36 | // failing to specify a matching Pod Selector will result in a validation error 37 | // during Deployment creation. 38 | // See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-selector 39 | ss := manifests.NewQuerierDeployment(manifests.Options{ 40 | Name: "abcd", 41 | Namespace: "efgh", 42 | Stack: lokiv1beta1.LokiStackSpec{ 43 | StorageClassName: "standard", 44 | Template: &lokiv1beta1.LokiTemplateSpec{ 45 | Querier: &lokiv1beta1.LokiComponentSpec{ 46 | Replicas: 1, 47 | }, 48 | }, 49 | }, 50 | }) 51 | 52 | l := ss.Spec.Template.GetObjectMeta().GetLabels() 53 | for key, value := range ss.Spec.Selector.MatchLabels { 54 | require.Contains(t, l, key) 55 | require.Equal(t, l[key], value) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /internal/manifests/query-frontend_test.go: -------------------------------------------------------------------------------- 1 | package manifests_test 2 | 3 | import ( 4 | "testing" 5 | 6 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 7 | "github.com/ViaQ/loki-operator/internal/manifests" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestNewQueryFrontendDeployment_SelectorMatchesLabels(t *testing.T) { 12 | ss := manifests.NewQueryFrontendDeployment(manifests.Options{ 13 | Name: "abcd", 14 | Namespace: "efgh", 15 | Stack: lokiv1beta1.LokiStackSpec{ 16 | Template: &lokiv1beta1.LokiTemplateSpec{ 17 | QueryFrontend: &lokiv1beta1.LokiComponentSpec{ 18 | Replicas: 1, 19 | }, 20 | }, 21 | }, 22 | }) 23 | l := ss.Spec.Template.GetObjectMeta().GetLabels() 24 | for key, value := range ss.Spec.Selector.MatchLabels { 25 | require.Contains(t, l, key) 26 | require.Equal(t, l[key], value) 27 | } 28 | } 29 | 30 | func TestNewQueryFrontendDeployment_HasTemplateConfigHashAnnotation(t *testing.T) { 31 | ss := manifests.NewQueryFrontendDeployment(manifests.Options{ 32 | Name: "abcd", 33 | Namespace: "efgh", 34 | ConfigSHA1: "deadbeef", 35 | Stack: lokiv1beta1.LokiStackSpec{ 36 | Template: &lokiv1beta1.LokiTemplateSpec{ 37 | QueryFrontend: &lokiv1beta1.LokiComponentSpec{ 38 | Replicas: 1, 39 | }, 40 | }, 41 | }, 42 | }) 43 | expected := "loki.openshift.io/config-hash" 44 | annotations := ss.Spec.Template.Annotations 45 | require.Contains(t, annotations, expected) 46 | require.Equal(t, annotations[expected], "deadbeef") 47 | } 48 | -------------------------------------------------------------------------------- /internal/manifests/service_monitor_test.go: -------------------------------------------------------------------------------- 1 | package manifests 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | 9 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 10 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | // Test that all serviceMonitor match the labels of their services so that we know all serviceMonitor 16 | // will work when deployed. 17 | func TestServiceMonitorMatchLabels(t *testing.T) { 18 | type test struct { 19 | Service *corev1.Service 20 | ServiceMonitor *monitoringv1.ServiceMonitor 21 | } 22 | 23 | flags := FeatureFlags{ 24 | EnableCertificateSigningService: true, 25 | EnableServiceMonitors: true, 26 | EnableTLSServiceMonitorConfig: true, 27 | } 28 | 29 | opt := Options{ 30 | Name: "test", 31 | Namespace: "test", 32 | Image: "test", 33 | Flags: flags, 34 | Stack: lokiv1beta1.LokiStackSpec{ 35 | Size: lokiv1beta1.SizeOneXExtraSmall, 36 | Template: &lokiv1beta1.LokiTemplateSpec{ 37 | Compactor: &lokiv1beta1.LokiComponentSpec{ 38 | Replicas: 1, 39 | }, 40 | Distributor: &lokiv1beta1.LokiComponentSpec{ 41 | Replicas: 1, 42 | }, 43 | Ingester: &lokiv1beta1.LokiComponentSpec{ 44 | Replicas: 1, 45 | }, 46 | Querier: &lokiv1beta1.LokiComponentSpec{ 47 | Replicas: 1, 48 | }, 49 | QueryFrontend: &lokiv1beta1.LokiComponentSpec{ 50 | Replicas: 1, 51 | }, 52 | Gateway: &lokiv1beta1.LokiComponentSpec{ 53 | Replicas: 1, 54 | }, 55 | IndexGateway: &lokiv1beta1.LokiComponentSpec{ 56 | Replicas: 1, 57 | }, 58 | }, 59 | }, 60 | } 61 | 62 | table := []test{ 63 | { 64 | Service: NewDistributorHTTPService(opt), 65 | ServiceMonitor: NewDistributorServiceMonitor(opt), 66 | }, 67 | { 68 | Service: NewIngesterHTTPService(opt), 69 | ServiceMonitor: NewIngesterServiceMonitor(opt), 70 | }, 71 | { 72 | Service: NewQuerierHTTPService(opt), 73 | ServiceMonitor: NewQuerierServiceMonitor(opt), 74 | }, 75 | { 76 | Service: NewQueryFrontendHTTPService(opt), 77 | ServiceMonitor: NewQueryFrontendServiceMonitor(opt), 78 | }, 79 | { 80 | Service: NewCompactorHTTPService(opt), 81 | ServiceMonitor: NewCompactorServiceMonitor(opt), 82 | }, 83 | { 84 | Service: NewGatewayHTTPService(opt), 85 | ServiceMonitor: NewGatewayServiceMonitor(opt), 86 | }, 87 | { 88 | Service: NewIndexGatewayHTTPService(opt), 89 | ServiceMonitor: NewIndexGatewayServiceMonitor(opt), 90 | }, 91 | } 92 | 93 | for _, tst := range table { 94 | testName := fmt.Sprintf("%s_%s", tst.Service.GetName(), tst.ServiceMonitor.GetName()) 95 | t.Run(testName, func(t *testing.T) { 96 | t.Parallel() 97 | for k, v := range tst.ServiceMonitor.Spec.Selector.MatchLabels { 98 | if assert.Contains(t, tst.Service.Spec.Selector, k) { 99 | // only assert Equal if the previous assertion is successful or this will panic 100 | assert.Equal(t, v, tst.Service.Spec.Selector[k]) 101 | } 102 | } 103 | }) 104 | } 105 | } 106 | 107 | func TestServiceMonitorEndpoints_ForOpenShiftLoggingMode(t *testing.T) { 108 | flags := FeatureFlags{ 109 | EnableGateway: true, 110 | EnableCertificateSigningService: true, 111 | EnableServiceMonitors: true, 112 | EnableTLSServiceMonitorConfig: true, 113 | } 114 | 115 | opt := Options{ 116 | Name: "test", 117 | Namespace: "test", 118 | Image: "test", 119 | Flags: flags, 120 | Stack: lokiv1beta1.LokiStackSpec{ 121 | Size: lokiv1beta1.SizeOneXExtraSmall, 122 | Tenants: &lokiv1beta1.TenantsSpec{ 123 | Mode: lokiv1beta1.OpenshiftLogging, 124 | }, 125 | Template: &lokiv1beta1.LokiTemplateSpec{ 126 | Gateway: &lokiv1beta1.LokiComponentSpec{ 127 | Replicas: 1, 128 | }, 129 | }, 130 | }, 131 | } 132 | 133 | sm := NewGatewayServiceMonitor(opt) 134 | require.Len(t, sm.Spec.Endpoints, 2) 135 | } 136 | -------------------------------------------------------------------------------- /internal/sizes/predict.go: -------------------------------------------------------------------------------- 1 | package sizes 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/ViaQ/logerr/kverrors" 10 | 11 | "github.com/prometheus/client_golang/api" 12 | promv1 "github.com/prometheus/client_golang/api/prometheus/v1" 13 | "github.com/prometheus/common/config" 14 | "github.com/prometheus/common/model" 15 | ) 16 | 17 | var ( 18 | metricsClient Client 19 | 20 | // durationDataHour is the default time duration to consider for metric scraping. 21 | // It is passed as first parameter to predict_linear. 22 | durationDataHour = "1h" 23 | // timeoutClient is the timeout duration for prometheus client. 24 | timeoutClient = 10 * time.Second 25 | 26 | // promURL is the URL of the prometheus thanos querier 27 | promURL string 28 | // promToken is the token to connect to prometheus thanos querier. 29 | promToken string 30 | ) 31 | 32 | type client struct { 33 | api promv1.API 34 | timeout time.Duration 35 | } 36 | 37 | // Client is the interface which contains methods for querying and extracting metrics. 38 | type Client interface { 39 | LogLoggedBytesReceivedTotal(duration model.Duration) (float64, error) 40 | } 41 | 42 | func newClient(url, token string) (*client, error) { 43 | httpConfig := config.HTTPClientConfig{ 44 | BearerToken: config.Secret(token), 45 | TLSConfig: config.TLSConfig{ 46 | InsecureSkipVerify: true, 47 | }, 48 | } 49 | 50 | rt, rtErr := config.NewRoundTripperFromConfig(httpConfig, "size-calculator-metrics") 51 | 52 | if rtErr != nil { 53 | return nil, kverrors.Wrap(rtErr, "failed creating prometheus configuration") 54 | } 55 | 56 | pc, err := api.NewClient(api.Config{ 57 | Address: url, 58 | RoundTripper: rt, 59 | }) 60 | if err != nil { 61 | return nil, kverrors.Wrap(err, "failed creating prometheus client") 62 | } 63 | 64 | return &client{ 65 | api: promv1.NewAPI(pc), 66 | timeout: timeoutClient, 67 | }, nil 68 | } 69 | 70 | func (c *client) executeScalarQuery(query string) (float64, error) { 71 | ctx, cancel := context.WithTimeout(context.Background(), c.timeout) 72 | defer cancel() 73 | 74 | res, _, err := c.api.Query(ctx, query, time.Now()) 75 | if err != nil { 76 | return 0.0, kverrors.Wrap(err, "failed executing query", 77 | "query", query) 78 | } 79 | 80 | if res.Type() == model.ValScalar { 81 | value := res.(*model.Scalar) 82 | return float64(value.Value), nil 83 | } 84 | 85 | if res.Type() == model.ValVector { 86 | vec := res.(model.Vector) 87 | if vec.Len() == 0 { 88 | return 0.0, nil 89 | } 90 | 91 | return float64(vec[0].Value), nil 92 | } 93 | 94 | return 0.0, kverrors.Wrap(nil, "failed to parse result for query", 95 | "query", query) 96 | } 97 | 98 | func (c *client) LogLoggedBytesReceivedTotal(duration model.Duration) (float64, error) { 99 | query := fmt.Sprintf( 100 | `sum(predict_linear(log_logged_bytes_total[%s], %d))`, 101 | durationDataHour, 102 | int(time.Duration(duration).Seconds()), 103 | ) 104 | 105 | return c.executeScalarQuery(query) 106 | } 107 | 108 | // PredictFor takes the default duration and predicts 109 | // the amount of logs expected in 1 day 110 | func PredictFor(duration model.Duration) (logsCollected float64, err error) { 111 | promURL = os.Getenv("PROMETHEUS_URL") 112 | promToken = os.Getenv("PROMETHEUS_TOKEN") 113 | 114 | // Create a client to collect metrics 115 | metricsClient, err = newClient(promURL, promToken) 116 | if err != nil { 117 | return 0, kverrors.Wrap(err, "Failed to create metrics client") 118 | } 119 | 120 | logsCollected, err = metricsClient.LogLoggedBytesReceivedTotal(duration) 121 | if err != nil { 122 | return 0, err 123 | } 124 | 125 | return logsCollected, nil 126 | } 127 | -------------------------------------------------------------------------------- /internal/status/components.go: -------------------------------------------------------------------------------- 1 | package status 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/kverrors" 7 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 8 | "github.com/ViaQ/loki-operator/internal/external/k8s" 9 | "github.com/ViaQ/loki-operator/internal/manifests" 10 | 11 | corev1 "k8s.io/api/core/v1" 12 | apierrors "k8s.io/apimachinery/pkg/api/errors" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | // SetComponentsStatus updates the pod status map component 18 | func SetComponentsStatus(ctx context.Context, k k8s.Client, req ctrl.Request) error { 19 | var s lokiv1beta1.LokiStack 20 | if err := k.Get(ctx, req.NamespacedName, &s); err != nil { 21 | if apierrors.IsNotFound(err) { 22 | return nil 23 | } 24 | return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) 25 | } 26 | 27 | var err error 28 | s.Status.Components = lokiv1beta1.LokiStackComponentStatus{} 29 | s.Status.Components.Compactor, err = appendPodStatus(ctx, k, manifests.LabelCompactorComponent, s.Name, s.Namespace) 30 | if err != nil { 31 | return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelCompactorComponent) 32 | } 33 | 34 | s.Status.Components.Querier, err = appendPodStatus(ctx, k, manifests.LabelQuerierComponent, s.Name, s.Namespace) 35 | if err != nil { 36 | return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQuerierComponent) 37 | } 38 | 39 | s.Status.Components.Distributor, err = appendPodStatus(ctx, k, manifests.LabelDistributorComponent, s.Name, s.Namespace) 40 | if err != nil { 41 | return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelDistributorComponent) 42 | } 43 | 44 | s.Status.Components.QueryFrontend, err = appendPodStatus(ctx, k, manifests.LabelQueryFrontendComponent, s.Name, s.Namespace) 45 | if err != nil { 46 | return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQueryFrontendComponent) 47 | } 48 | 49 | s.Status.Components.Ingester, err = appendPodStatus(ctx, k, manifests.LabelIngesterComponent, s.Name, s.Namespace) 50 | if err != nil { 51 | return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelIngesterComponent) 52 | } 53 | 54 | s.Status.Components.Gateway, err = appendPodStatus(ctx, k, manifests.LabelGatewayComponent, s.Name, s.Namespace) 55 | if err != nil { 56 | return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelGatewayComponent) 57 | } 58 | return k.Status().Update(ctx, &s, &client.UpdateOptions{}) 59 | } 60 | 61 | func appendPodStatus(ctx context.Context, k k8s.Client, component, stack, ns string) (lokiv1beta1.PodStatusMap, error) { 62 | psm := lokiv1beta1.PodStatusMap{} 63 | pods := &corev1.PodList{} 64 | opts := []client.ListOption{ 65 | client.MatchingLabels(manifests.ComponentLabels(component, stack)), 66 | client.InNamespace(ns), 67 | } 68 | if err := k.List(ctx, pods, opts...); err != nil { 69 | return nil, kverrors.Wrap(err, "failed to list pods for LokiStack component", "name", stack, "component", component) 70 | } 71 | for _, pod := range pods.Items { 72 | phase := pod.Status.Phase 73 | psm[phase] = append(psm[phase], pod.Name) 74 | } 75 | return psm, nil 76 | } 77 | -------------------------------------------------------------------------------- /internal/status/components_test.go: -------------------------------------------------------------------------------- 1 | package status_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 8 | "github.com/ViaQ/loki-operator/internal/external/k8s/k8sfakes" 9 | "github.com/ViaQ/loki-operator/internal/status" 10 | "github.com/stretchr/testify/require" 11 | 12 | v1 "k8s.io/api/core/v1" 13 | apierrors "k8s.io/apimachinery/pkg/api/errors" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/runtime/schema" 16 | "k8s.io/apimachinery/pkg/types" 17 | ctrl "sigs.k8s.io/controller-runtime" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | ) 20 | 21 | func TestSetComponentsStatus_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { 22 | k := &k8sfakes.FakeClient{} 23 | 24 | r := ctrl.Request{ 25 | NamespacedName: types.NamespacedName{ 26 | Name: "my-stack", 27 | Namespace: "some-ns", 28 | }, 29 | } 30 | 31 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 32 | return apierrors.NewBadRequest("something wasn't found") 33 | } 34 | 35 | err := status.SetComponentsStatus(context.TODO(), k, r) 36 | require.Error(t, err) 37 | } 38 | 39 | func TestSetComponentsStatus_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { 40 | k := &k8sfakes.FakeClient{} 41 | 42 | r := ctrl.Request{ 43 | NamespacedName: types.NamespacedName{ 44 | Name: "my-stack", 45 | Namespace: "some-ns", 46 | }, 47 | } 48 | 49 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 50 | return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") 51 | } 52 | 53 | err := status.SetComponentsStatus(context.TODO(), k, r) 54 | require.NoError(t, err) 55 | } 56 | 57 | func TestSetComponentsStatus_WhenListReturnError_ReturnError(t *testing.T) { 58 | sw := &k8sfakes.FakeStatusWriter{} 59 | k := &k8sfakes.FakeClient{} 60 | 61 | k.StatusStub = func() client.StatusWriter { return sw } 62 | 63 | s := lokiv1beta1.LokiStack{ 64 | ObjectMeta: metav1.ObjectMeta{ 65 | Name: "my-stack", 66 | Namespace: "some-ns", 67 | }, 68 | } 69 | 70 | r := ctrl.Request{ 71 | NamespacedName: types.NamespacedName{ 72 | Name: "my-stack", 73 | Namespace: "some-ns", 74 | }, 75 | } 76 | 77 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 78 | if r.Name == name.Name && r.Namespace == name.Namespace { 79 | k.SetClientObject(object, &s) 80 | return nil 81 | } 82 | return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") 83 | } 84 | 85 | k.ListStub = func(_ context.Context, l client.ObjectList, opts ...client.ListOption) error { 86 | return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") 87 | } 88 | 89 | err := status.SetComponentsStatus(context.TODO(), k, r) 90 | require.Error(t, err) 91 | } 92 | 93 | func TestSetComponentsStatus_WhenPodListExisting_SetPodStatusMap(t *testing.T) { 94 | sw := &k8sfakes.FakeStatusWriter{} 95 | k := &k8sfakes.FakeClient{} 96 | 97 | k.StatusStub = func() client.StatusWriter { return sw } 98 | 99 | s := lokiv1beta1.LokiStack{ 100 | ObjectMeta: metav1.ObjectMeta{ 101 | Name: "my-stack", 102 | Namespace: "some-ns", 103 | }, 104 | } 105 | 106 | r := ctrl.Request{ 107 | NamespacedName: types.NamespacedName{ 108 | Name: "my-stack", 109 | Namespace: "some-ns", 110 | }, 111 | } 112 | 113 | k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object) error { 114 | if r.Name == name.Name && r.Namespace == name.Namespace { 115 | k.SetClientObject(object, &s) 116 | return nil 117 | } 118 | return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") 119 | } 120 | 121 | k.ListStub = func(_ context.Context, l client.ObjectList, _ ...client.ListOption) error { 122 | pods := v1.PodList{ 123 | Items: []v1.Pod{ 124 | { 125 | ObjectMeta: metav1.ObjectMeta{ 126 | Name: "pod-a", 127 | }, 128 | Status: v1.PodStatus{ 129 | Phase: v1.PodPending, 130 | }, 131 | }, 132 | { 133 | ObjectMeta: metav1.ObjectMeta{ 134 | Name: "pod-b", 135 | }, 136 | Status: v1.PodStatus{ 137 | Phase: v1.PodRunning, 138 | }, 139 | }, 140 | }, 141 | } 142 | k.SetClientObjectList(l, &pods) 143 | return nil 144 | } 145 | 146 | expected := lokiv1beta1.PodStatusMap{ 147 | "Pending": []string{"pod-a"}, 148 | "Running": []string{"pod-b"}, 149 | } 150 | 151 | sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { 152 | stack := obj.(*lokiv1beta1.LokiStack) 153 | require.Equal(t, expected, stack.Status.Components.Compactor) 154 | return nil 155 | } 156 | 157 | err := status.SetComponentsStatus(context.TODO(), k, r) 158 | require.NoError(t, err) 159 | require.NotZero(t, k.ListCallCount()) 160 | require.NotZero(t, k.StatusCallCount()) 161 | require.NotZero(t, sw.UpdateCallCount()) 162 | } 163 | -------------------------------------------------------------------------------- /internal/status/status.go: -------------------------------------------------------------------------------- 1 | package status 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ViaQ/logerr/kverrors" 7 | lokiv1beta1 "github.com/ViaQ/loki-operator/api/v1beta1" 8 | "github.com/ViaQ/loki-operator/internal/external/k8s" 9 | 10 | corev1 "k8s.io/api/core/v1" 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | ) 14 | 15 | // Refresh executes an aggregate update of the LokiStack Status struct, i.e. 16 | // - It recreates the Status.Components pod status map per component. 17 | // - It sets the appropriate Status.Condition to true that matches the pod status maps. 18 | func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request) error { 19 | if err := SetComponentsStatus(ctx, k, req); err != nil { 20 | return err 21 | } 22 | 23 | var s lokiv1beta1.LokiStack 24 | if err := k.Get(ctx, req.NamespacedName, &s); err != nil { 25 | if apierrors.IsNotFound(err) { 26 | return nil 27 | } 28 | return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) 29 | } 30 | 31 | cs := s.Status.Components 32 | 33 | // Check for failed pods first 34 | failed := len(cs.Compactor[corev1.PodFailed]) + 35 | len(cs.Distributor[corev1.PodFailed]) + 36 | len(cs.Ingester[corev1.PodFailed]) + 37 | len(cs.Querier[corev1.PodFailed]) + 38 | len(cs.QueryFrontend[corev1.PodFailed]) 39 | 40 | unknown := len(cs.Compactor[corev1.PodUnknown]) + 41 | len(cs.Distributor[corev1.PodUnknown]) + 42 | len(cs.Ingester[corev1.PodUnknown]) + 43 | len(cs.Querier[corev1.PodUnknown]) + 44 | len(cs.QueryFrontend[corev1.PodUnknown]) 45 | 46 | if failed != 0 || unknown != 0 { 47 | return SetFailedCondition(ctx, k, req) 48 | } 49 | 50 | // Check for pending pods 51 | pending := len(cs.Compactor[corev1.PodPending]) + 52 | len(cs.Distributor[corev1.PodPending]) + 53 | len(cs.Ingester[corev1.PodPending]) + 54 | len(cs.Querier[corev1.PodPending]) + 55 | len(cs.QueryFrontend[corev1.PodPending]) 56 | 57 | if pending != 0 { 58 | return SetPendingCondition(ctx, k, req) 59 | } 60 | return SetReadyCondition(ctx, k, req) 61 | } 62 | -------------------------------------------------------------------------------- /tools/tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package tools 4 | 5 | import ( 6 | _ "github.com/maxbrunsfeld/counterfeiter/v6" 7 | ) 8 | 9 | // This file imports packages that are used when running go generate, or used 10 | // during the development process but not otherwise depended on by built code. 11 | --------------------------------------------------------------------------------