├── .github ├── ISSUE_TEMPLATE │ ├── bug.yaml │ ├── config.yml │ └── feat_req.yaml ├── workflows │ ├── e2e-test.yaml │ ├── golangci-lint.yaml │ ├── helm-lint.yaml │ ├── helm-release.yaml │ ├── helm-test.yaml │ ├── push.yaml │ ├── unit-test.yaml │ └── yamllint.yaml └── zizmor.yml ├── .gitignore ├── .yamllint.yaml ├── Dockerfile.controller ├── Dockerfile.runner ├── Dockerfile.starter ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── groupversion_info.go │ ├── k6conditions.go │ ├── plzconditions.go │ ├── privateloadzone_types.go │ ├── testrun_types.go │ └── zz_generated.deepcopy.go ├── assets └── data-flow.png ├── bundle.yaml ├── charts ├── .helmdocsignore ├── .helmignore ├── .yamllint ├── cr.yaml ├── ct.yaml └── k6-operator │ ├── Chart.yaml │ ├── README.md │ ├── README.md.gotmpl │ ├── samples │ ├── customAnnotationsAndLabels.yaml │ └── serviceMonitorCustomLabels.yaml │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── clusterRole.yaml │ ├── clusterRoleBinding.yaml │ ├── crds │ │ ├── plz.yaml │ │ └── testrun.yaml │ ├── deployment.yaml │ ├── metrics │ │ └── serviceMonitor.yaml │ ├── namespace.yaml │ ├── role.yaml │ ├── roleBinding.yaml │ ├── service.yaml │ └── serviceAccount.yaml │ ├── values.schema.json │ └── values.yaml ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── bases │ │ ├── k6.io_privateloadzones.yaml │ │ └── k6.io_testruns.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_privateloadzones.yaml │ │ ├── cainjection_in_testruns.yaml │ │ ├── webhook_in_privateloadzones.yaml │ │ └── webhook_in_testruns.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── bases │ │ └── k6-operator.clusterserviceversion.yaml │ └── kustomization.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── k6_editor_role.yaml │ ├── k6_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── privateloadzone_editor_role.yaml │ ├── privateloadzone_viewer_role.yaml │ ├── role.yaml │ ├── role_binding.yaml │ ├── service_account.yaml │ ├── testrun_editor_role.yaml │ └── testrun_viewer_role.yaml ├── samples │ ├── k6_v1alpha1_configmap.yaml │ ├── k6_v1alpha1_k6.yaml │ ├── k6_v1alpha1_k6_with_initContainers.yaml │ ├── k6_v1alpha1_k6_with_localfile.yaml │ ├── k6_v1alpha1_k6_with_output.yaml │ ├── k6_v1alpha1_k6_with_readOnlyVolumeClaim.yaml │ ├── k6_v1alpha1_k6_with_securitycontext.yaml │ ├── k6_v1alpha1_k6_with_topologyspreadconstraints.yaml │ ├── k6_v1alpha1_k6_with_volumeClaim.yaml │ ├── k6_v1alpha1_privateloadzone.yaml │ ├── k6_v1alpha1_testrun.yaml │ └── kustomization.yaml ├── scorecard │ ├── bases │ │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ │ ├── basic.config.yaml │ │ └── olm.config.yaml └── webhook │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── service.yaml ├── controllers ├── common.go ├── k6_create.go ├── k6_finish.go ├── k6_initialize.go ├── k6_start.go ├── k6_stop.go ├── k6_stopped_jobs.go ├── plz_controller.go ├── suite_test.go └── testrun_controller.go ├── docs ├── env-vars.md ├── on-native-distributed-execution.md ├── plz.md ├── releases.md └── versioning.md ├── e2e ├── README.md ├── basic-testrun-1 │ ├── manifests │ │ ├── configmap.yaml │ │ └── kustomization.yaml │ ├── test.js │ └── testrun.yaml ├── basic-testrun-4 │ ├── manifests │ │ ├── configmap.yaml │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── test.js │ └── testrun.yaml ├── error-stage │ ├── manifests │ │ ├── configmap.yaml │ │ └── kustomization.yaml │ ├── test.js │ └── testrun.yaml ├── init-container-volume │ ├── manifests │ │ └── kustomization.yaml │ ├── test.js │ └── testrun.yaml ├── ipv6 │ └── kind-ipv6.yaml ├── latest │ ├── ClusterRole-k6-operator-manager-role.yml │ ├── ClusterRole-k6-operator-metrics-reader.yml │ ├── ClusterRole-k6-operator-proxy-role.yml │ ├── ClusterRoleBinding-k6-operator-manager-rolebinding.yml │ ├── ClusterRoleBinding-k6-operator-proxy-rolebinding.yml │ ├── CustomResourceDefinition-privateloadzones.k6.io.yaml │ ├── CustomResourceDefinition-testruns.k6.io.yaml │ ├── Deployment-k6-operator-controller-manager.yml │ ├── Namespace-k6-operator-system.yml │ ├── Role-k6-operator-leader-election-role.yml │ ├── RoleBinding-k6-operator-leader-election-rolebinding.yml │ ├── Service-k6-operator-controller-manager-metrics-service.yml │ ├── ServiceAccount-k6-operator-controller.yml │ └── kustomization.yaml ├── multifile │ ├── manifests │ │ ├── configmap.yaml │ │ └── kustomization.yaml │ ├── test.js │ └── testrun.yaml ├── run-tests.sh ├── test-initcontainer-volumes.yaml ├── test-initcontainer.yaml ├── test.js ├── test.yaml ├── testrun-archive │ ├── manifests │ │ ├── configmap.yaml │ │ └── kustomization.yaml │ ├── test.js │ └── testrun.yaml ├── testrun-cleanup │ ├── manifests │ │ ├── configmap.yaml │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── test.js │ └── testrun.yaml ├── testrun-cloud-output │ ├── manifests │ │ ├── configmap.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── secret.yaml │ ├── test.js │ └── testrun.yaml ├── testrun-simultaneous-cloud-output │ ├── manifests │ │ ├── configmap.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── secret.yaml │ ├── test.js │ ├── testrun1.yaml │ └── testrun2.yaml ├── testrun-simultaneous │ ├── manifests │ │ ├── configmap.yaml │ │ └── kustomization.yaml │ ├── test.js │ ├── testrun1.yaml │ └── testrun2.yaml └── testrun-watch-namespace │ ├── manifests │ ├── configmap.yaml │ ├── invisible-ns.yaml │ ├── kustomization.yaml │ └── some-ns.yaml │ ├── test.js │ ├── testrun-invisible.yaml │ └── testrun.yaml ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt ├── main.go └── pkg ├── cloud ├── aggregation.go ├── aggregation_test.go ├── cloud_output.go ├── conn │ ├── poller.go │ └── poller_test.go ├── plz.go ├── resources_test.go ├── test_runs.go ├── types.go └── types_test.go ├── plz ├── scheme.go ├── worker.go ├── worker_test.go └── workers.go ├── resources ├── containers │ ├── curl_start.go │ ├── curl_stop.go │ └── s3.go └── jobs │ ├── helpers.go │ ├── helpers_test.go │ ├── initializer.go │ ├── initializer_test.go │ ├── runner.go │ ├── runner_test.go │ ├── starter.go │ ├── starter_test.go │ ├── stopper.go │ └── stopper_test.go ├── segmentation ├── segmentation.go └── suite_test.go ├── testrun ├── k6client.go └── template.go └── types ├── conditions.go ├── conditions_test.go ├── k6cli.go ├── k6cli_test.go ├── k6status.go └── script.go /.github/ISSUE_TEMPLATE/bug.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | description: Use this template for reporting bugs. Please search existing issues first. 4 | labels: bug 5 | body: 6 | - type: textarea 7 | attributes: 8 | label: Brief summary 9 | validations: 10 | required: true 11 | - type: markdown 12 | attributes: 13 | value: '## Environment' 14 | - type: input 15 | attributes: 16 | label: k6-operator version or image 17 | validations: 18 | required: true 19 | - type: input 20 | attributes: 21 | label: Helm chart version (if applicable) 22 | validations: 23 | required: false 24 | - type: textarea 25 | attributes: 26 | label: TestRun / PrivateLoadZone YAML 27 | placeholder: Paste your YAML ```with 3 backticks to include formatting``` 28 | validations: 29 | required: true 30 | - type: input 31 | attributes: 32 | label: Other environment details (if applicable) 33 | - type: markdown 34 | attributes: 35 | value: '## Detailed issue description' 36 | - type: textarea 37 | attributes: 38 | label: Steps to reproduce the problem 39 | validations: 40 | required: true 41 | - type: textarea 42 | attributes: 43 | label: Expected behaviour 44 | validations: 45 | required: true 46 | - type: textarea 47 | attributes: 48 | label: Actual behaviour 49 | validations: 50 | required: true 51 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Grafana Community Forum - k6-operator 4 | url: https://community.grafana.com/c/grafana-k6/k6-operator/73 5 | about: Please ask and answer questions here. 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feat_req.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | description: Use this template for suggesting new features. 4 | labels: enhancement 5 | body: 6 | - type: textarea 7 | attributes: 8 | label: Feature Description 9 | description: A clear and concise description of the problem or missing capability 10 | validations: 11 | required: true 12 | - type: textarea 13 | attributes: 14 | label: Suggested Solution (optional) 15 | description: If you have a solution in mind, please describe it. 16 | - type: textarea 17 | attributes: 18 | label: Already existing or connected issues / PRs (optional) 19 | description: If you have found some issues or pull requests that are related to your new issue, please link them here. 20 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Golang Lint" 3 | permissions: {} 4 | on: 5 | - push 6 | - pull_request 7 | 8 | jobs: 9 | golangci: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | with: 14 | persist-credentials: false 15 | - uses: actions/setup-go@v5 16 | with: 17 | go-version: '1.23' 18 | cache: false 19 | - name: lint 20 | # this is v6.5.2 21 | uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 22 | with: 23 | version: v1.61 24 | args: --timeout=5m 25 | -------------------------------------------------------------------------------- /.github/workflows/helm-lint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Helm Lint 3 | permissions: {} 4 | on: 5 | # `ct lint` does not work well with tag references on releases. 6 | # OTOH, Helm linting on tags is not necessary so long as it 7 | # happens on push to branches. 8 | push: 9 | branches: 10 | - '**' 11 | paths: 12 | - 'charts/**' 13 | pull_request: 14 | branches-ignore: 15 | - 'release-**/bundle-update' 16 | 17 | jobs: 18 | lint: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | with: 24 | persist-credentials: false 25 | fetch-depth: 0 26 | 27 | - name: Set up Helm 28 | # this is v4.3.0 29 | uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 30 | with: 31 | version: v3.7.2 32 | 33 | - uses: actions/setup-python@v2 34 | with: 35 | python-version: 3.13.1 36 | 37 | - name: Set up chart-testing 38 | # this is v2.7.0 39 | uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b 40 | 41 | - name: Run chart-testing (lint) 42 | run: | 43 | ct lint --config ./charts/ct.yaml 44 | -------------------------------------------------------------------------------- /.github/workflows/helm-release.yaml: -------------------------------------------------------------------------------- 1 | 2 | name: Helm release 3 | permissions: {} 4 | 5 | on: 6 | workflow_dispatch: {} 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - 'charts/k6-operator/Chart.yaml' 12 | 13 | jobs: 14 | generate-chart-schema: 15 | runs-on: ubuntu-latest 16 | permissions: 17 | contents: write 18 | pull-requests: write 19 | 20 | steps: 21 | - name: Checkout repository 22 | uses: actions/checkout@v2 23 | with: 24 | persist-credentials: true # this job is opening a PR in the next steps 25 | 26 | - name: Make changes to the file 27 | run: | 28 | make helm-schema 29 | 30 | - name: Git diff 31 | id: git-diff 32 | continue-on-error: true 33 | run: | 34 | if git diff --exit-code; then 35 | echo "changes=false" >> $GITHUB_OUTPUT 36 | else 37 | echo "changes=true" >> $GITHUB_OUTPUT 38 | fi 39 | 40 | - name: "Commit changes and make PR" 41 | if: steps.git-diff.outputs.changes == 'true' 42 | run: | 43 | git checkout -b helm/schema-update 44 | git add charts/k6-operator/values.schema.json 45 | git config user.name "github-actions[bot]" 46 | git config user.email "41898282+github-actions[bot]@users.noreply.github.com" 47 | 48 | git commit -m "auto: generate schema json" 49 | git push -u origin helm/schema-update 50 | gh pr create --title "Helm schema update" -B main -H helm/schema-update --body "Auto-generated by Github Workflow" 51 | 52 | # We're failing here, so that maintainer can review the PR and merge it. Then Helm release should be re-run. 53 | echo "Review the PR with schema update and re-run Helm release." 54 | exit 1 55 | env: 56 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 57 | 58 | call-update-helm-repo: 59 | permissions: 60 | packages: write 61 | id-token: write 62 | contents: write 63 | needs: 64 | - generate-chart-schema 65 | uses: grafana/helm-charts/.github/workflows/update-helm-repo.yaml@main 66 | with: 67 | charts_dir: charts 68 | cr_configfile: charts/cr.yaml 69 | ct_configfile: charts/ct.yaml 70 | helm_tag_prefix: helm 71 | secrets: 72 | vault_repo_secret_name: github-app -------------------------------------------------------------------------------- /.github/workflows/helm-test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Helm Test 3 | permissions: {} 4 | on: 5 | push: 6 | # run only on branches and not tags 7 | branches: 8 | - '**' 9 | paths: 10 | - 'charts/**' 11 | pull_request: 12 | branches-ignore: 13 | - 'release-**/bundle-update' 14 | 15 | jobs: 16 | test: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v4 21 | with: 22 | fetch-depth: 0 23 | persist-credentials: false 24 | 25 | - name: Set up Helm 26 | # this is v4.3.0 27 | uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 28 | with: 29 | version: v3.7.2 30 | 31 | - uses: actions/setup-python@v2 32 | with: 33 | python-version: 3.13.1 34 | 35 | - name: Set up chart-testing 36 | # this is v2.7.0 37 | uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b 38 | 39 | - name: Run chart-testing (list-changed) 40 | id: list-changed 41 | run: | 42 | changed=$(ct list-changed --config ./charts/ct.yaml) 43 | 44 | if [[ -n "$changed" ]]; then 45 | echo "changed=true" >> $GITHUB_OUTPUT 46 | fi 47 | 48 | - name: Create kind cluster 49 | # this is v1.12.0 50 | uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 51 | if: steps.list-changed.outputs.changed == 'true' 52 | 53 | - name: Run chart-testing (install) 54 | if: steps.list-changed.outputs.changed == 'true' 55 | run: | 56 | ct install --config ./charts/ct.yaml --helm-extra-set-args "--set=namespace.create=false" 57 | -------------------------------------------------------------------------------- /.github/workflows/unit-test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Unit Test" 3 | permissions: {} 4 | on: 5 | - push 6 | - pull_request 7 | jobs: 8 | test: 9 | strategy: 10 | matrix: 11 | go-version: [1.23.x] 12 | k8s_version: [1.24.1, 1.27.1, 1.30.0] 13 | os: [ubuntu-latest] 14 | runs-on: ${{ matrix.os }} 15 | steps: 16 | - name: Install Go 17 | uses: actions/setup-go@v5 18 | with: 19 | go-version: ${{ matrix.go-version }} 20 | - name: Checkout code 21 | uses: actions/checkout@v4 22 | with: 23 | persist-credentials: false 24 | - name: Test 25 | run: | 26 | make test-setup 27 | make test 28 | env: 29 | ENVTEST_K8S_VERSION: ${{ matrix.k8s_version }} 30 | -------------------------------------------------------------------------------- /.github/workflows/yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Yaml Lint 3 | permissions: {} 4 | on: 5 | - push 6 | - pull_request 7 | 8 | jobs: 9 | lintAllTheThings: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | with: 14 | persist-credentials: false 15 | - name: yaml-lint 16 | # this is v3.1.1 17 | uses: ibiqlik/action-yamllint@2576378a8e339169678f9939646ee3ee325e845c 18 | with: 19 | file_or_dir: config/**/*.yaml e2e/*.yaml 20 | config_file: .yamllint.yaml 21 | -------------------------------------------------------------------------------- /.github/zizmor.yml: -------------------------------------------------------------------------------- 1 | rules: 2 | cache-poisoning: 3 | ignore: 4 | - e2e-test.yaml:30 5 | - push.yaml:56 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | bin 8 | 9 | # Test binary, build with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | 15 | # Kubernetes Generated files - skip generated files, except for vendored files 16 | !vendor/**/zz_generated.* 17 | 18 | # editor and IDE paraphernalia 19 | .idea 20 | *.swp 21 | *.swo 22 | *~ 23 | 24 | /manager 25 | 26 | kubebuilder-tools-* 27 | 28 | .vscode 29 | -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | ignore: | 4 | /config/crd/bases/* 5 | /charts/* 6 | rules: 7 | line-length: disable 8 | comments: 9 | min-spaces-from-content: 1 10 | trailing-spaces: 11 | level: warning 12 | indentation: disable # kustomize can rewrite yaml files and break this rule 13 | truthy: 14 | ignore: | 15 | /.github/workflows 16 | -------------------------------------------------------------------------------- /Dockerfile.controller: -------------------------------------------------------------------------------- 1 | ARG GO_BUILDER_IMG 2 | # Build the manager binary 3 | FROM ${GO_BUILDER_IMG} AS builder 4 | 5 | WORKDIR /workspace 6 | # Copy the Go Modules manifests 7 | COPY go.mod go.mod 8 | COPY go.sum go.sum 9 | # cache deps before building and copying source so that we don't need to re-download as much 10 | # and so that source changes don't invalidate our downloaded layer 11 | RUN go mod download 12 | 13 | # Copy the go source 14 | COPY main.go main.go 15 | COPY api/ api/ 16 | COPY controllers/ controllers/ 17 | COPY pkg/ pkg/ 18 | 19 | 20 | # Build 21 | RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -o manager main.go 22 | 23 | # Use distroless as minimal base image to package the manager binary 24 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 25 | FROM gcr.io/distroless/static:nonroot 26 | WORKDIR / 27 | COPY --from=builder /workspace/manager . 28 | 29 | # as defined by distroless for nonroot 30 | USER 65532:65532 31 | 32 | ENTRYPOINT ["/manager"] 33 | -------------------------------------------------------------------------------- /Dockerfile.runner: -------------------------------------------------------------------------------- 1 | FROM grafana/k6:latest 2 | 3 | COPY --from=kvij/scuttle:latest scuttle /bin/scuttle 4 | ENTRYPOINT ["scuttle", "k6"] 5 | -------------------------------------------------------------------------------- /Dockerfile.starter: -------------------------------------------------------------------------------- 1 | FROM curlimages/curl:latest 2 | 3 | # as defined by distroless for nonroot 4 | USER 65532:65532 5 | COPY --from=kvij/scuttle:latest /scuttle /bin/scuttle 6 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | # Code generated by tool. DO NOT EDIT. 2 | # This file is used to track the info used to scaffold your project 3 | # and allow the plugins properly work. 4 | # More info: https://book.kubebuilder.io/reference/project-config.html 5 | domain: io 6 | layout: 7 | - go.kubebuilder.io/v2 8 | plugins: 9 | go.sdk.operatorframework.io/v2-alpha: {} 10 | projectName: k6-operator 11 | repo: github.com/grafana/k6-operator 12 | resources: 13 | - controller: true 14 | domain: io 15 | group: k6 16 | kind: K6 17 | path: github.com/grafana/k6-operator/api/v1alpha1 18 | version: v1alpha1 19 | - api: 20 | crdVersion: v1beta1 21 | namespaced: true 22 | controller: true 23 | domain: io 24 | group: k6 25 | kind: PrivateLoadZone 26 | path: github.com/grafana/k6-operator/api/v1alpha1 27 | version: v1alpha1 28 | - api: 29 | crdVersion: v1beta1 30 | namespaced: true 31 | controller: true 32 | domain: io 33 | group: k6 34 | kind: TestRun 35 | path: github.com/grafana/k6-operator/api/v1alpha1 36 | version: v1alpha1 37 | version: "3" 38 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the k6 v1alpha1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=k6.io 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "k6.io", Version: "v1alpha1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /api/v1alpha1/plzconditions.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | "github.com/grafana/k6-operator/pkg/types" 5 | "k8s.io/apimachinery/pkg/api/meta" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | const ( 10 | // PLZRegistered indicates if the PLZ has been registered. 11 | // - if empty / Unknown / False, call registration 12 | // - if True, do nothing 13 | PLZRegistered = "PLZRegistered" 14 | ) 15 | 16 | func (plz *PrivateLoadZone) Initialize() { 17 | t := metav1.Now() 18 | plz.Status.Conditions = []metav1.Condition{ 19 | metav1.Condition{ 20 | Type: PLZRegistered, 21 | Status: metav1.ConditionUnknown, 22 | LastTransitionTime: t, 23 | Reason: "PLZRegisteredUnknown", 24 | Message: "", 25 | }, 26 | } 27 | } 28 | func (plz PrivateLoadZone) IsTrue(conditionType string) bool { 29 | return meta.IsStatusConditionTrue(plz.Status.Conditions, conditionType) 30 | } 31 | 32 | func (plz PrivateLoadZone) IsFalse(conditionType string) bool { 33 | return meta.IsStatusConditionFalse(plz.Status.Conditions, conditionType) 34 | } 35 | 36 | func (plz PrivateLoadZone) IsUnknown(conditionType string) bool { 37 | return !plz.IsFalse(conditionType) && !plz.IsTrue(conditionType) 38 | } 39 | 40 | func (plz PrivateLoadZone) UpdateCondition(conditionType string, conditionStatus metav1.ConditionStatus) { 41 | types.UpdateCondition(&plz.Status.Conditions, conditionType, conditionStatus) 42 | } 43 | 44 | // SetIfNewer changes plzstatus only if changes in proposedStatus are newer. 45 | // If there were any acceptable changes proposed, it returns true. 46 | func (plzStatus *PrivateLoadZoneStatus) SetIfNewer(proposedStatus PrivateLoadZoneStatus) (isNewer bool) { 47 | return types.SetIfNewer(&plzStatus.Conditions, proposedStatus.Conditions, nil) 48 | } 49 | -------------------------------------------------------------------------------- /assets/data-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/k6-operator/4702756ad52174e7ffc3837d1f57179bbd377610/assets/data-flow.png -------------------------------------------------------------------------------- /charts/.helmdocsignore: -------------------------------------------------------------------------------- 1 | examples/ 2 | -------------------------------------------------------------------------------- /charts/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ -------------------------------------------------------------------------------- /charts/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | ignore: | 5 | templates/*.yaml 6 | repository-metadata.yaml 7 | 8 | rules: 9 | document-start: disable 10 | braces: 11 | max-spaces-inside: 1 12 | level: error 13 | brackets: 14 | max-spaces-inside: 1 15 | level: error 16 | empty-lines: 17 | max: 1 18 | indentation: 19 | spaces: 2 20 | line-length: 21 | max: 200 22 | -------------------------------------------------------------------------------- /charts/cr.yaml: -------------------------------------------------------------------------------- 1 | git-repo: helm-charts 2 | owner: grafana 3 | skip-existing: true 4 | release-name-template: "helm-{{ .Name }}-{{ .Version }}" 5 | 6 | generate-release-notes: true -------------------------------------------------------------------------------- /charts/ct.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | remote: origin 3 | target-branch: main 4 | chart-dirs: 5 | - charts 6 | chart-repos: 7 | - grafana=https://grafana.github.io/helm-charts 8 | helm-extra-args: --timeout 600s 9 | check-version-increment: false 10 | validate-maintainers: false 11 | validate-yaml: true 12 | validate-schema-chart: true -------------------------------------------------------------------------------- /charts/k6-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "0.0.21" 3 | description: A Helm chart to install the k6-operator 4 | name: k6-operator 5 | version: 3.13.1 6 | kubeVersion: ">=1.16.0-0" 7 | home: https://k6.io 8 | sources: 9 | - https://github.com/grafana/k6-operator 10 | keywords: 11 | - load-testing 12 | - smoke-testing 13 | - stress-testing 14 | - soak-testing 15 | - kubernetes 16 | - distributed-testing 17 | maintainers: 18 | - name: yorugac 19 | email: olha@k6.io 20 | icon: https://raw.githubusercontent.com/grafana/k6-docs/main/static/images/icon.png 21 | -------------------------------------------------------------------------------- /charts/k6-operator/README.md.gotmpl: -------------------------------------------------------------------------------- 1 | {{ template "chart.header" . }} 2 | 3 | {{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} 4 | 5 | {{ template "chart.description" . }} 6 | 7 | {{ template "chart.homepageLine" . }} 8 | 9 | {{ template "chart.maintainersSection" . }} 10 | 11 | {{ template "chart.sourcesSection" . }} 12 | 13 | {{ template "chart.requirementsSection" . }} 14 | 15 | {{ template "chart.valuesSection" . }} 16 | 17 | {{ template "helm-docs.versionFooter" . }} -------------------------------------------------------------------------------- /charts/k6-operator/samples/customAnnotationsAndLabels.yaml: -------------------------------------------------------------------------------- 1 | customAnnotations: 2 | "customized-annotation": "k6-operator" 3 | 4 | customLabels: 5 | "customized-labels": "k6-operator" 6 | 7 | podLabels: 8 | environment: production 9 | owner: development 10 | 11 | podAnnotations: 12 | "customized-annotation": "k6-operator" 13 | 14 | nodeSelector: 15 | disktype: ssd 16 | 17 | affinity: 18 | nodeAffinity: 19 | requiredDuringSchedulingIgnoredDuringExecution: 20 | nodeSelectorTerms: 21 | - matchExpressions: 22 | - key: topology.kubernetes.io/zone 23 | operator: In 24 | values: 25 | - antarctica-east1 26 | - antarctica-west1 27 | preferredDuringSchedulingIgnoredDuringExecution: 28 | - weight: 1 29 | preference: 30 | matchExpressions: 31 | - key: another-node-label-key 32 | operator: In 33 | values: 34 | - another-node-label-value 35 | 36 | tolerations: 37 | - key: "key1" 38 | operator: "Exists" 39 | effect: "NoSchedule" 40 | 41 | authProxy: 42 | resources: 43 | limits: 44 | cpu: 100m 45 | memory: 100Mi 46 | requests: 47 | cpu: 100m 48 | memory: 50Mi 49 | 50 | manager: 51 | image: 52 | registry: ghcr.io 53 | repository: grafana/k6-operator 54 | tag: latest 55 | pullPolicy: IfNotPresent 56 | env: 57 | - name: ENV_EXAMPLE_1 58 | value: "true" 59 | - name: ENV_EXAMPLE_2 60 | value: "1" 61 | - name: ENV_EXAMPLE_3 62 | value: "testing" 63 | readinessProbe: 64 | failureThreshold: 3 65 | httpGet: 66 | path: /readyz 67 | port: 8081 68 | scheme: HTTP 69 | initialDelaySeconds: 20 70 | periodSeconds: 5 71 | successThreshold: 1 72 | timeoutSeconds: 10 73 | livenessProbe: 74 | failureThreshold: 3 75 | httpGet: 76 | path: /healthz 77 | port: 8081 78 | scheme: HTTP 79 | initialDelaySeconds: 20 80 | periodSeconds: 5 81 | successThreshold: 1 82 | timeoutSeconds: 10 83 | -------------------------------------------------------------------------------- /charts/k6-operator/samples/serviceMonitorCustomLabels.yaml: -------------------------------------------------------------------------------- 1 | # Deploy a serviceMonitor on a different namespace, with dedicated jobLabel. 2 | metrics: 3 | serviceMonitor: 4 | enabled: true 5 | namespace: monitoring 6 | labels: 7 | foo: bar 8 | jobLabel: foo 9 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing {{ .Chart.Name }}. 2 | 3 | Your release is named {{ .Release.Name }}. 4 | 5 | To learn more about the release, try: 6 | 7 | $ helm status {{ .Release.Name }} 8 | $ helm get all {{ .Release.Name }} 9 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "k6-operator.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "k6-operator.fullname" -}} 15 | {{- if .Values.fullnameOverride }} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 17 | {{- else }} 18 | {{- $name := default .Chart.Name .Values.nameOverride }} 19 | {{- if contains $name .Release.Name }} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 21 | {{- else }} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "k6-operator.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 32 | {{- end }} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "k6-operator.labels" -}} 38 | helm.sh/chart: {{ include "k6-operator.chart" . }} 39 | {{ include "k6-operator.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | app.kubernetes.io/part-of: k6-operator 45 | {{- end }} 46 | 47 | {{/* 48 | Selector labels 49 | */}} 50 | {{- define "k6-operator.selectorLabels" -}} 51 | app.kubernetes.io/name: {{ include "k6-operator.name" . }} 52 | app.kubernetes.io/instance: {{ .Release.Name }} 53 | {{- end }} 54 | 55 | {{/* 56 | Create the name of the service account to use 57 | */}} 58 | {{- define "k6-operator.serviceAccountName" -}} 59 | {{- if .Values.manager.serviceAccount.create }} 60 | {{- default (include "k6-operator.fullname" .) .Values.manager.serviceAccount.name }} 61 | {{- else }} 62 | {{- default "default" .Values.manager.serviceAccount.name }} 63 | {{- end }} 64 | {{- end }} 65 | 66 | 67 | {{- define "k6-operator.customLabels" -}} 68 | {{- if .Values.customLabels }} 69 | {{- with .Values.customLabels }} 70 | {{- toYaml . }} 71 | {{- end }} 72 | {{- end }} 73 | {{- end -}} 74 | 75 | {{- define "k6-operator.podLabels" -}} 76 | {{- if .Values.podLabels }} 77 | {{- with .Values.podLabels }} 78 | {{- toYaml . }} 79 | {{- end }} 80 | {{- end }} 81 | {{- end -}} 82 | 83 | {{- define "k6-operator.customAnnotations" -}} 84 | {{- if .Values.customAnnotations }} 85 | {{- with .Values.customAnnotations }} 86 | {{- toYaml . }} 87 | {{- end }} 88 | {{- end }} 89 | {{- end -}} 90 | 91 | {{- define "k6-operator.podAnnotations" -}} 92 | {{- if .Values.podAnnotations }} 93 | {{- with .Values.podAnnotations }} 94 | {{- toYaml . }} 95 | {{- end }} 96 | {{- end }} 97 | {{- end -}} 98 | 99 | {{- define "k6-operator.namespace" -}} 100 | {{- if eq .Release.Namespace "default" }} 101 | {{- printf "%v-system" .Release.Name | indent 1 }} 102 | {{- else }} 103 | {{- .Release.Namespace | indent 1 }} 104 | {{- end }} 105 | {{- end -}} 106 | 107 | 108 | {{- define "k6-operator.livenessProbe" -}} 109 | {{- if .Values.authProxy.livenessProbe }} 110 | livenessProbe: 111 | {{- toYaml .Values.authProxy.livenessProbe | nindent 12 }} 112 | {{- end }} 113 | {{- end -}} 114 | 115 | {{- define "k6-operator.readinessProbe" -}} 116 | {{- if .Values.authProxy.readinessProbe }} 117 | readinessProbe: 118 | {{- toYaml .Values.authProxy.readinessProbe | nindent 12 }} 119 | {{- end }} 120 | {{- end -}} 121 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.rbac.namespaced }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ include "k6-operator.fullname" . }}-manager-rolebinding 6 | labels: 7 | app.kubernetes.io/component: controller 8 | {{- include "k6-operator.labels" . | nindent 4 }} 9 | {{- include "k6-operator.customLabels" . | default "" | nindent 4 }} 10 | annotations: 11 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: {{ include "k6-operator.fullname" . }}-manager-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ include "k6-operator.serviceAccountName" . }} 19 | namespace: {{- include "k6-operator.namespace" . -}} 20 | {{- if .Values.authProxy.enabled }} 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: {{ include "k6-operator.fullname" . }}-proxy-rolebinding 26 | labels: 27 | {{- include "k6-operator.customLabels" . | default "" | nindent 4 }} 28 | annotations: 29 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 30 | roleRef: 31 | apiGroup: rbac.authorization.k8s.io 32 | kind: ClusterRole 33 | name: {{ include "k6-operator.fullname" . }}-proxy-role 34 | subjects: 35 | - kind: ServiceAccount 36 | name: {{ include "k6-operator.serviceAccountName" . }} 37 | namespace: {{- include "k6-operator.namespace" . -}} 38 | {{- end }} 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/metrics/serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.metrics.serviceMonitor.enabled }} 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | name: controller-manager-metrics-monitor 7 | namespace: {{ .Values.metrics.serviceMonitor.namespace | default (include "k6-operator.namespace" .) }} 8 | labels: 9 | control-plane: "controller-manager" 10 | app.kubernetes.io/component: monitoring 11 | {{- include "k6-operator.labels" . | nindent 4 }} 12 | {{- with .Values.metrics.serviceMonitor.labels }} 13 | {{- toYaml . | nindent 4 }} 14 | {{- end }} 15 | {{- include "k6-operator.customLabels" . | default "" | nindent 4 }} 16 | annotations: 17 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 18 | spec: 19 | namespaceSelector: 20 | matchNames: 21 | - {{- include "k6-operator.namespace" . }} 22 | selector: 23 | matchLabels: 24 | control-plane: "controller-manager" 25 | {{- with .Values.metrics.serviceMonitor.selector }} 26 | {{- toYaml . | nindent 6 }} 27 | {{- end }} 28 | endpoints: 29 | - port: https 30 | path: /metrics 31 | {{- with .Values.metrics.serviceMonitor.interval }} 32 | interval: {{ . }} 33 | {{- end }} 34 | {{- with .Values.metrics.serviceMonitor.scrapeTimeout }} 35 | scrapeTimeout: {{ . }} 36 | {{- end }} 37 | honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels | default false }} 38 | {{- with .Values.metrics.serviceMonitor.relabelings }} 39 | relabelings: 40 | {{- toYaml . | nindent 8 }} 41 | {{- end }} 42 | {{- with .Values.metrics.serviceMonitor.metricRelabelings }} 43 | metricRelabelings: 44 | {{- toYaml . | nindent 8 }} 45 | {{- end }} 46 | {{- with .Values.metrics.serviceMonitor.jobLabel }} 47 | jobLabel: {{ . }} 48 | {{- end }} 49 | {{- end }} 50 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/namespace.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.namespace.create }} 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{- include "k6-operator.namespace" . }} 6 | labels: 7 | app.kubernetes.io/name: {{ include "k6-operator.fullname" . }} 8 | control-plane: "controller-manager" 9 | {{- with .Values.customLabels }} 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | annotations: 13 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: {{ include "k6-operator.fullname" . }}-leader-election-role 5 | namespace: {{- include "k6-operator.namespace" . }} 6 | labels: 7 | app.kubernetes.io/component: controller 8 | {{- include "k6-operator.labels" . | nindent 4 }} 9 | {{- include "k6-operator.customLabels" . | default "" | nindent 4 }} 10 | annotations: 11 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - configmaps 17 | verbs: 18 | - get 19 | - list 20 | - watch 21 | - create 22 | - update 23 | - patch 24 | - delete 25 | - apiGroups: 26 | - coordination.k8s.io 27 | resources: 28 | - leases 29 | verbs: 30 | - create 31 | - get 32 | - list 33 | - update 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - events 38 | verbs: 39 | - create 40 | - patch 41 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/roleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: {{ include "k6-operator.fullname" . }}-leader-election-rolebinding 5 | namespace: {{- include "k6-operator.namespace" . }} 6 | labels: 7 | app.kubernetes.io/component: controller 8 | {{- include "k6-operator.labels" . | nindent 4 }} 9 | {{- include "k6-operator.customLabels" . | default "" | nindent 4 }} 10 | annotations: 11 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: {{ include "k6-operator.fullname" . }}-leader-election-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ .Values.manager.serviceAccount.name }} 19 | namespace: {{- include "k6-operator.namespace" . }} 20 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if or .Values.authProxy.enabled .Values.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "k6-operator.fullname" . }}-controller-manager-metrics-service 6 | namespace: {{- include "k6-operator.namespace" . }} 7 | labels: 8 | control-plane: "controller-manager" 9 | app.kubernetes.io/component: controller 10 | {{- include "k6-operator.labels" . | nindent 4 }} 11 | {{- include "k6-operator.customLabels" . | default "" | nindent 4 }} 12 | {{- with .Values.service.annotations }} 13 | {{ toYaml . | nindent 4 }} 14 | {{- end }} 15 | annotations: 16 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 17 | {{- with .Values.service.annotations }} 18 | {{ toYaml . | nindent 4 }} 19 | {{- end }} 20 | spec: 21 | ports: 22 | - name: https 23 | port: 8443 24 | targetPort: https 25 | selector: 26 | control-plane: "controller-manager" 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /charts/k6-operator/templates/serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ include "k6-operator.serviceAccountName" . }} 5 | namespace: {{- include "k6-operator.namespace" . }} 6 | labels: 7 | app.kubernetes.io/component: controller 8 | {{- include "k6-operator.labels" . | nindent 4 }} 9 | {{- include "k6-operator.customLabels" . | default "" | nindent 4 }} 10 | annotations: 11 | {{- include "k6-operator.customAnnotations" . | default "" | nindent 4 }} 12 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # The following manifests contain a self-signed issuer CR and a certificate CR. 3 | # More document can be found at https://docs.cert-manager.io 4 | # WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for 5 | # breaking changes 6 | apiVersion: cert-manager.io/v1alpha2 7 | kind: Issuer 8 | metadata: 9 | name: selfsigned-issuer 10 | namespace: system 11 | spec: 12 | selfSigned: {} 13 | --- 14 | apiVersion: cert-manager.io/v1alpha2 15 | kind: Certificate 16 | metadata: 17 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 18 | namespace: system 19 | spec: 20 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 21 | dnsNames: 22 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 23 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 24 | issuerRef: 25 | kind: Issuer 26 | name: selfsigned-issuer 27 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 28 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - certificate.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This configuration is for teaching kustomize how to update name ref and var substitution 3 | nameReference: 4 | - kind: Issuer 5 | group: cert-manager.io 6 | fieldSpecs: 7 | - kind: Certificate 8 | group: cert-manager.io 9 | path: spec/issuerRef/name 10 | 11 | varReference: 12 | - kind: Certificate 13 | group: cert-manager.io 14 | path: spec/commonName 15 | - kind: Certificate 16 | group: cert-manager.io 17 | path: spec/dnsNames 18 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This kustomization.yaml is not intended to be run by itself, 3 | # since it depends on service name and namespace that are out of this kustomize package. 4 | # It should be run by config/default 5 | resources: 6 | - bases/k6.io_privateloadzones.yaml 7 | - bases/k6.io_testruns.yaml 8 | # +kubebuilder:scaffold:crdkustomizeresource 9 | 10 | patchesStrategicMerge: 11 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 12 | # patches here are for enabling the conversion webhook for each CRD 13 | # - patches/webhook_in_testruns.yaml 14 | #- patches/webhook_in_privateloadzones.yaml 15 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 16 | 17 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 18 | # patches here are for enabling the CA injection for each CRD 19 | # - patches/cainjection_in_testruns.yaml 20 | #- patches/cainjection_in_privateloadzones.yaml 21 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 22 | 23 | # the following config is for teaching kustomize how to do kustomization for CRDs. 24 | configurations: 25 | - kustomizeconfig.yaml 26 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | --- 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: CustomResourceDefinition 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhookClientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | group: apiextensions.k8s.io 14 | path: spec/conversion/webhookClientConfig/service/namespace 15 | create: false 16 | 17 | varReference: 18 | - path: metadata/annotations 19 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_privateloadzones.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: privateloadzones.k6.io 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_testruns.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: testruns.k6.io 9 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_privateloadzones.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: privateloadzones.k6.io 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_testruns.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: testruns.k6.io 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | namespace: k6-operator-system 3 | namePrefix: k6-operator- 4 | 5 | # Labels to add to all resources and selectors. 6 | # commonLabels: 7 | # someName: someValue 8 | 9 | bases: 10 | - ../crd 11 | - ../rbac 12 | - ../manager 13 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 14 | # crd/kustomization.yaml 15 | # - ../webhook 16 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 17 | # - ../certmanager 18 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 19 | # - ../prometheus 20 | 21 | patchesStrategicMerge: 22 | # Protect the /metrics endpoint by putting it behind auth. 23 | # If you want your controller-manager to expose the /metrics 24 | # endpoint w/o any authn/z, please comment the following line. 25 | - manager_auth_proxy_patch.yaml 26 | 27 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 28 | # crd/kustomization.yaml 29 | # - manager_webhook_patch.yaml 30 | 31 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 32 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 33 | # 'CERTMANAGER' needs to be enabled to use ca injection 34 | # - webhookcainjection_patch.yaml 35 | 36 | # the following config is for teaching kustomize how to do var substitution 37 | vars: 38 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 39 | # - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 40 | # objref: 41 | # kind: Certificate 42 | # group: cert-manager.io 43 | # version: v1alpha2 44 | # name: serving-cert # this name should match the one in certificate.yaml 45 | # fieldref: 46 | # fieldpath: metadata.namespace 47 | # - name: CERTIFICATE_NAME 48 | # objref: 49 | # kind: Certificate 50 | # group: cert-manager.io 51 | # version: v1alpha2 52 | # name: serving-cert # this name should match the one in certificate.yaml 53 | # - name: SERVICE_NAMESPACE # namespace of the service 54 | # objref: 55 | # kind: Service 56 | # version: v1 57 | # name: webhook-service 58 | # fieldref: 59 | # fieldpath: metadata.namespace 60 | # - name: SERVICE_NAME 61 | # objref: 62 | # kind: Service 63 | # version: v1 64 | # name: webhook-service 65 | 66 | # Uncomment this section if you need cloud output and copy-paste your token 67 | # secretGenerator: 68 | # - name: cloud-token 69 | # literals: 70 | # - token= 71 | # options: 72 | # annotations: 73 | # kubernetes.io/service-account.name: k6-operator-controller 74 | # labels: 75 | # k6cloud: token 76 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This patch inject a sidecar container which is a HTTP proxy for the 3 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: controller-manager 8 | namespace: system 9 | spec: 10 | template: 11 | spec: 12 | containers: 13 | - name: manager 14 | args: 15 | - "--metrics-addr=127.0.0.1:8080" 16 | - "--enable-leader-election" 17 | - name: kube-rbac-proxy 18 | image: quay.io/brancz/kube-rbac-proxy:v0.18.2 19 | args: 20 | - "--secure-listen-address=0.0.0.0:8443" 21 | - "--upstream=http://127.0.0.1:8080/" 22 | - "--logtostderr=true" 23 | - "--v=10" 24 | ports: 25 | - containerPort: 8443 26 | name: https 27 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: controller-manager 6 | namespace: system 7 | spec: 8 | template: 9 | spec: 10 | containers: 11 | - name: manager 12 | ports: 13 | - containerPort: 9443 14 | name: webhook-server 15 | protocol: TCP 16 | volumeMounts: 17 | - mountPath: /tmp/k8s-webhook-server/serving-certs 18 | name: cert 19 | readOnly: true 20 | volumes: 21 | - name: cert 22 | secret: 23 | defaultMode: 420 24 | secretName: webhook-server-cert 25 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This patch add annotation to admission webhook config and 3 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 4 | apiVersion: admissionregistration.k8s.io/v1beta1 5 | kind: MutatingWebhookConfiguration 6 | metadata: 7 | name: mutating-webhook-configuration 8 | annotations: 9 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 10 | --- 11 | apiVersion: admissionregistration.k8s.io/v1beta1 12 | kind: ValidatingWebhookConfiguration 13 | metadata: 14 | name: validating-webhook-configuration 15 | annotations: 16 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 17 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | images: 4 | - name: controller 5 | newName: ghcr.io/grafana/k6-operator 6 | newTag: latest 7 | apiVersion: kustomize.config.k8s.io/v1beta1 8 | kind: Kustomization 9 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: k6-operator 7 | control-plane: controller-manager 8 | name: system 9 | --- 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | metadata: 13 | name: controller-manager 14 | namespace: system 15 | labels: 16 | control-plane: controller-manager 17 | spec: 18 | selector: 19 | matchLabels: 20 | control-plane: controller-manager 21 | replicas: 1 22 | template: 23 | metadata: 24 | labels: 25 | control-plane: controller-manager 26 | spec: 27 | serviceAccountName: k6-operator-controller 28 | containers: 29 | - command: 30 | - /manager 31 | args: 32 | - --enable-leader-election 33 | image: controller:latest 34 | name: manager 35 | resources: 36 | limits: 37 | cpu: 100m 38 | memory: 100Mi 39 | requests: 40 | cpu: 100m 41 | memory: 50Mi 42 | terminationGracePeriodSeconds: 10 43 | -------------------------------------------------------------------------------- /config/manifests/bases/k6-operator.clusterserviceversion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | annotations: 5 | alm-examples: '[]' 6 | capabilities: Basic Install 7 | containerImage: ghcr.io/grafana/k6-operator 8 | repository: https://github.com/grafana/k6-operator 9 | support: k6 10 | name: k6-operator.v0.0.0 11 | namespace: placeholder 12 | spec: 13 | apiservicedefinitions: {} 14 | customresourcedefinitions: 15 | owned: 16 | - displayName: K6 17 | kind: K6 18 | name: k6s.k6.io 19 | version: v1alpha1 20 | - description: PrivateLoadZone is the Schema for the privateloadzones API 21 | displayName: Private Load Zone 22 | kind: PrivateLoadZone 23 | name: privateloadzones.k6.io 24 | version: v1alpha1 25 | - description: TestRun is the Schema for the testruns API 26 | displayName: Test Run 27 | kind: TestRun 28 | name: testruns.k6.io 29 | version: v1alpha1 30 | description: k6-operator 31 | displayName: k6-operator 32 | icon: 33 | - base64data: "" 34 | mediatype: "" 35 | install: 36 | spec: 37 | deployments: null 38 | strategy: "" 39 | installModes: 40 | - supported: false 41 | type: OwnNamespace 42 | - supported: false 43 | type: SingleNamespace 44 | - supported: false 45 | type: MultiNamespace 46 | - supported: true 47 | type: AllNamespaces 48 | keywords: 49 | - testing 50 | - performance 51 | - reliability 52 | - k6 53 | links: 54 | - name: k6 Operator 55 | url: https://github.com/grafana/k6-operator 56 | maintainers: 57 | - email: olha@k6.io 58 | name: Olha Yevtushenko 59 | - email: daniel@k6.io 60 | name: Daniel González Lopes 61 | maturity: alpha 62 | provider: 63 | name: k6.io 64 | url: https://k6.io/ 65 | version: 0.0.0 66 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # These resources constitute the fully configured set of manifests 3 | # used to generate the 'manifests/' directory in a bundle. 4 | resources: 5 | - bases/k6-operator.clusterserviceversion.yaml 6 | - ../default 7 | - ../samples 8 | - ../scorecard 9 | # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. 10 | # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. 11 | # These patches remove the unnecessary "cert" volume and its manager container volumeMount. 12 | # patchesJson6902: 13 | # - target: 14 | # group: apps 15 | # version: v1 16 | # kind: Deployment 17 | # name: controller-manager 18 | # namespace: system 19 | # patch: |- 20 | # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. 21 | # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. 22 | # - op: remove 23 | # path: /spec/template/spec/containers/1/volumeMounts/0 24 | # # Remove the "cert" volume, since OLM will create and mount a set of certs. 25 | # # Update the indices in this path if adding or removing volumes in the manager's Deployment. 26 | # - op: remove 27 | # path: /spec/template/spec/volumes/0 28 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - monitor.yaml 4 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | selector: 15 | matchLabels: 16 | control-plane: controller-manager 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: metrics-reader 6 | rules: 7 | - nonResourceURLs: ["/metrics"] 8 | verbs: ["get"] 9 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: proxy-role 6 | rules: 7 | - apiGroups: ["authentication.k8s.io"] 8 | resources: 9 | - tokenreviews 10 | verbs: ["create"] 11 | - apiGroups: ["authorization.k8s.io"] 12 | resources: 13 | - subjectaccessreviews 14 | verbs: ["create"] 15 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: proxy-rolebinding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: proxy-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: k6-operator-controller 13 | namespace: k6-operator-system 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | control-plane: controller-manager 7 | name: controller-manager-metrics-service 8 | namespace: system 9 | spec: 10 | ports: 11 | - name: https 12 | port: 8443 13 | targetPort: https 14 | selector: 15 | control-plane: controller-manager 16 | -------------------------------------------------------------------------------- /config/rbac/k6_editor_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # permissions for end users to edit k6s. 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: k6-editor-role 7 | rules: 8 | - apiGroups: 9 | - k6.io 10 | resources: 11 | - k6s 12 | verbs: 13 | - create 14 | - delete 15 | - get 16 | - list 17 | - patch 18 | - update 19 | - watch 20 | - apiGroups: 21 | - k6.io 22 | resources: 23 | - k6s/status 24 | verbs: 25 | - get 26 | -------------------------------------------------------------------------------- /config/rbac/k6_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # permissions for end users to view k6s. 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: k6-viewer-role 7 | rules: 8 | - apiGroups: 9 | - k6.io 10 | resources: 11 | - k6s 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - apiGroups: 17 | - k6.io 18 | resources: 19 | - k6s/status 20 | verbs: 21 | - get 22 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - role.yaml 4 | - role_binding.yaml 5 | - service_account.yaml 6 | - leader_election_role.yaml 7 | - leader_election_role_binding.yaml 8 | # Comment the following 4 lines if you want to disable 9 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 10 | # which protects your /metrics endpoint. 11 | - auth_proxy_service.yaml 12 | - auth_proxy_role.yaml 13 | - auth_proxy_role_binding.yaml 14 | - auth_proxy_client_clusterrole.yaml 15 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # permissions to do leader election. 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | name: leader-election-role 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - configmaps 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - create 17 | - update 18 | - patch 19 | - delete 20 | - apiGroups: 21 | - coordination.k8s.io 22 | resources: 23 | - leases 24 | verbs: 25 | - create 26 | - get 27 | - list 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - events 33 | verbs: 34 | - create 35 | - patch 36 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: leader-election-rolebinding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: leader-election-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: k6-operator-controller 13 | namespace: k6-operator-system 14 | -------------------------------------------------------------------------------- /config/rbac/privateloadzone_editor_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # permissions for end users to edit privateloadzones. 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: privateloadzone-editor-role 7 | rules: 8 | - apiGroups: 9 | - k6.io 10 | resources: 11 | - privateloadzones 12 | verbs: 13 | - create 14 | - delete 15 | - get 16 | - list 17 | - patch 18 | - update 19 | - watch 20 | - apiGroups: 21 | - k6.io 22 | resources: 23 | - privateloadzones/status 24 | verbs: 25 | - get 26 | -------------------------------------------------------------------------------- /config/rbac/privateloadzone_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # permissions for end users to view privateloadzones. 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: privateloadzone-viewer-role 7 | rules: 8 | - apiGroups: 9 | - k6.io 10 | resources: 11 | - privateloadzones 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - apiGroups: 17 | - k6.io 18 | resources: 19 | - privateloadzones/status 20 | verbs: 21 | - get 22 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - secrets 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - services 19 | verbs: 20 | - create 21 | - delete 22 | - get 23 | - list 24 | - patch 25 | - update 26 | - watch 27 | - apiGroups: 28 | - apps 29 | resources: 30 | - deployments 31 | verbs: 32 | - create 33 | - delete 34 | - get 35 | - list 36 | - patch 37 | - update 38 | - watch 39 | - apiGroups: 40 | - batch 41 | resources: 42 | - jobs 43 | verbs: 44 | - create 45 | - delete 46 | - get 47 | - list 48 | - patch 49 | - update 50 | - watch 51 | - apiGroups: 52 | - coordination.k8s.io 53 | resources: 54 | - leases 55 | verbs: 56 | - create 57 | - get 58 | - list 59 | - update 60 | - apiGroups: 61 | - "" 62 | resources: 63 | - pods 64 | - pods/log 65 | verbs: 66 | - get 67 | - list 68 | - watch 69 | - apiGroups: 70 | - k6.io 71 | resources: 72 | - privateloadzones 73 | - testruns 74 | verbs: 75 | - create 76 | - delete 77 | - get 78 | - list 79 | - patch 80 | - update 81 | - watch 82 | - apiGroups: 83 | - k6.io 84 | resources: 85 | - privateloadzones/finalizers 86 | - privateloadzones/status 87 | - testruns/finalizers 88 | - testruns/status 89 | verbs: 90 | - get 91 | - patch 92 | - update 93 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: manager-rolebinding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: manager-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: k6-operator-controller 13 | namespace: k6-operator-system 14 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: controller 6 | namespace: system 7 | -------------------------------------------------------------------------------- /config/rbac/testrun_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit testruns. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: testrun-editor-role 6 | rules: 7 | - apiGroups: 8 | - k6.io 9 | resources: 10 | - testruns 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - k6.io 21 | resources: 22 | - testruns/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/testrun_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view testruns. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: testrun-viewer-role 6 | rules: 7 | - apiGroups: 8 | - k6.io 9 | resources: 10 | - testruns 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - k6.io 17 | resources: 18 | - testruns/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: k6-test 6 | data: 7 | test.js: | 8 | import http from 'k6/http'; 9 | import { Rate } from 'k6/metrics'; 10 | import { check, sleep } from 'k6'; 11 | 12 | const failRate = new Rate('failed_requests'); 13 | 14 | export let options = { 15 | stages: [ 16 | { target: 200, duration: '30s' }, 17 | { target: 0, duration: '30s' }, 18 | ], 19 | thresholds: { 20 | failed_requests: ['rate<=0'], 21 | http_req_duration: ['p(95)<500'], 22 | }, 23 | }; 24 | 25 | export default function () { 26 | const result = http.get('https://quickpizza.grafana.com'); 27 | check(result, { 28 | 'http response status code is 200': result.status === 200, 29 | }); 30 | failRate.add(result.status !== 200); 31 | sleep(1); 32 | } 33 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 4 8 | script: 9 | configMap: 10 | name: k6-test 11 | file: test.js 12 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6_with_initContainers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 4 8 | script: 9 | configMap: 10 | name: k6-test 11 | file: test.js 12 | initializer: 13 | initContainers: 14 | - image: busybox:latest 15 | command: ["sh", "-c", "echo 'is part of initializer pod'"] 16 | runner: 17 | initContainers: 18 | - image: busybox:latest 19 | command: ["sh", "-c", "echo 'is part of all 4 testrun pods'"] 20 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6_with_localfile.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 4 8 | script: 9 | localFile: /test/test.js 10 | runner: 11 | image: 12 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6_with_output.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 4 8 | script: k6-test 9 | arguments: --out kafka=brokers=kafka-host:9092,topic=test-output,format=json 10 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6_with_readOnlyVolumeClaim.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | namespace: load-test 7 | spec: 8 | parallelism: 4 9 | script: 10 | volumeClaim: 11 | name: stress-test-volumeClaim 12 | file: test.js 13 | # If ReadOnly is set to true, PVCs are mounted in Pods on a ReadOnly basis. 14 | readOnly: true 15 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6_with_securitycontext.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 4 8 | script: 9 | configMap: 10 | name: k6-test 11 | file: test.js 12 | initializer: 13 | containerSecurityContext: 14 | allowPrivilegeEscalation: false 15 | capabilities: 16 | drop: 17 | - ALL 18 | securityContext: 19 | fsGroup: 1 20 | runAsNonRoot: true 21 | runAsUser: 12345 22 | seccompProfile: 23 | type: RuntimeDefault 24 | supplementalGroups: 25 | - 12345 26 | starter: 27 | containerSecurityContext: 28 | allowPrivilegeEscalation: false 29 | capabilities: 30 | drop: 31 | - ALL 32 | securityContext: 33 | fsGroup: 1 34 | runAsNonRoot: true 35 | runAsUser: 12345 36 | seccompProfile: 37 | type: RuntimeDefault 38 | supplementalGroups: 39 | - 12345 40 | runner: 41 | containerSecurityContext: 42 | allowPrivilegeEscalation: false 43 | capabilities: 44 | drop: 45 | - ALL 46 | securityContext: 47 | fsGroup: 1 48 | runAsNonRoot: true 49 | runAsUser: 12345 50 | seccompProfile: 51 | type: RuntimeDefault 52 | supplementalGroups: 53 | - 12345 54 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6_with_topologyspreadconstraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k6.io/v1alpha1 2 | kind: TestRun 3 | metadata: 4 | name: testrun-sample 5 | spec: 6 | parallelism: 4 7 | script: 8 | configMap: 9 | name: k6-test 10 | file: test.js 11 | runner: 12 | metadata: 13 | labels: 14 | testrun: sample 15 | topologySpreadConstraints: 16 | - maxSkew: 1 17 | topologyKey: kubernetes.io/hostname 18 | whenUnsatisfiable: ScheduleAnyway 19 | labelSelector: 20 | matchExpressions: 21 | - key: testrun 22 | operator: "In" 23 | values: 24 | - sample 25 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_k6_with_volumeClaim.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | namespace: load-test 7 | spec: 8 | parallelism: 4 9 | script: 10 | volumeClaim: 11 | name: stress-test-volumeClaim 12 | # test.js should exist inside /test/ folder. 13 | # And, All the js files and directories test.js 14 | # is importing from should be inside the same directory as well. 15 | file: test.js 16 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_privateloadzone.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k6.io/v1alpha1 2 | kind: PrivateLoadZone 3 | metadata: 4 | name: privateloadzone-sample 5 | namespace: foo 6 | spec: 7 | token: grafana-k6-token 8 | resources: 9 | cpu: 200m 10 | memory: 512Mi 11 | -------------------------------------------------------------------------------- /config/samples/k6_v1alpha1_testrun.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k6.io/v1alpha1 2 | kind: TestRun 3 | metadata: 4 | name: testrun-sample 5 | spec: 6 | parallelism: 4 7 | script: 8 | configMap: 9 | name: k6-test 10 | file: test.js 11 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Append samples you want in your CSV to this file as resources ## 3 | resources: 4 | - k6_v1alpha1_configmap.yaml 5 | - k6_v1alpha1_k6.yaml 6 | - k6_v1alpha1_privateloadzone.yaml 7 | - k6_v1alpha1_testrun.yaml 8 | # +kubebuilder:scaffold:manifestskustomizesamples 9 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: scorecard.operatorframework.io/v1alpha3 3 | kind: Configuration 4 | metadata: 5 | name: config 6 | stages: 7 | - parallel: true 8 | tests: [] 9 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - bases/config.yaml 4 | patchesJson6902: 5 | - path: patches/basic.config.yaml 6 | target: 7 | group: scorecard.operatorframework.io 8 | version: v1alpha3 9 | kind: Configuration 10 | name: config 11 | - path: patches/olm.config.yaml 12 | target: 13 | group: scorecard.operatorframework.io 14 | version: v1alpha3 15 | kind: Configuration 16 | name: config 17 | # +kubebuilder:scaffold:patchesJson6902 18 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /stages/0/tests/- 4 | value: 5 | entrypoint: 6 | - scorecard-test 7 | - basic-check-spec 8 | image: quay.io/operator-framework/scorecard-test:v1.1.0 9 | labels: 10 | suite: basic 11 | test: basic-check-spec-test 12 | -------------------------------------------------------------------------------- /config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /stages/0/tests/- 4 | value: 5 | entrypoint: 6 | - scorecard-test 7 | - olm-bundle-validation 8 | image: quay.io/operator-framework/scorecard-test:v1.1.0 9 | labels: 10 | suite: olm 11 | test: olm-bundle-validation-test 12 | - op: add 13 | path: /stages/0/tests/- 14 | value: 15 | entrypoint: 16 | - scorecard-test 17 | - olm-crds-have-validation 18 | image: quay.io/operator-framework/scorecard-test:v1.1.0 19 | labels: 20 | suite: olm 21 | test: olm-crds-have-validation-test 22 | - op: add 23 | path: /stages/0/tests/- 24 | value: 25 | entrypoint: 26 | - scorecard-test 27 | - olm-crds-have-resources 28 | image: quay.io/operator-framework/scorecard-test:v1.1.0 29 | labels: 30 | suite: olm 31 | test: olm-crds-have-resources-test 32 | - op: add 33 | path: /stages/0/tests/- 34 | value: 35 | entrypoint: 36 | - scorecard-test 37 | - olm-spec-descriptors 38 | image: quay.io/operator-framework/scorecard-test:v1.1.0 39 | labels: 40 | suite: olm 41 | test: olm-spec-descriptors-test 42 | - op: add 43 | path: /stages/0/tests/- 44 | value: 45 | entrypoint: 46 | - scorecard-test 47 | - olm-status-descriptors 48 | image: quay.io/operator-framework/scorecard-test:v1.1.0 49 | labels: 50 | suite: olm 51 | test: olm-status-descriptors-test 52 | -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - manifests.yaml 4 | - service.yaml 5 | 6 | configurations: 7 | - kustomizeconfig.yaml 8 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # the following config is for teaching kustomize where to look at when substituting vars. 3 | # It requires kustomize v2.1.0 or newer to work properly. 4 | nameReference: 5 | - kind: Service 6 | version: v1 7 | fieldSpecs: 8 | - kind: MutatingWebhookConfiguration 9 | group: admissionregistration.k8s.io 10 | path: webhooks/clientConfig/service/name 11 | - kind: ValidatingWebhookConfiguration 12 | group: admissionregistration.k8s.io 13 | path: webhooks/clientConfig/service/name 14 | 15 | namespace: 16 | - kind: MutatingWebhookConfiguration 17 | group: admissionregistration.k8s.io 18 | path: webhooks/clientConfig/service/namespace 19 | create: true 20 | - kind: ValidatingWebhookConfiguration 21 | group: admissionregistration.k8s.io 22 | path: webhooks/clientConfig/service/namespace 23 | create: true 24 | 25 | varReference: 26 | - path: metadata/annotations 27 | -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 9443 11 | selector: 12 | control-plane: controller-manager 13 | -------------------------------------------------------------------------------- /controllers/k6_finish.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/go-logr/logr" 8 | "github.com/grafana/k6-operator/api/v1alpha1" 9 | "github.com/grafana/k6-operator/pkg/cloud" 10 | batchv1 "k8s.io/api/batch/v1" 11 | "k8s.io/apimachinery/pkg/labels" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | // FinishJobs checks if the runners pods have finished execution. 16 | func FinishJobs(ctx context.Context, log logr.Logger, k6 *v1alpha1.TestRun, r *TestRunReconciler) (allFinished bool) { 17 | if len(k6.GetStatus().TestRunID) > 0 { 18 | log = log.WithValues("testRunId", k6.GetStatus().TestRunID) 19 | } 20 | 21 | log.Info("Checking if all runner pods are finished") 22 | 23 | selector := labels.SelectorFromSet(map[string]string{ 24 | "app": "k6", 25 | "k6_cr": k6.NamespacedName().Name, 26 | "runner": "true", 27 | }) 28 | 29 | opts := &client.ListOptions{LabelSelector: selector, Namespace: k6.NamespacedName().Namespace} 30 | jl := &batchv1.JobList{} 31 | var err error 32 | 33 | if err = r.List(ctx, jl, opts); err != nil { 34 | log.Error(err, "Could not list jobs") 35 | return 36 | } 37 | 38 | // TODO: We should distinguish between Suceeded/Failed/Unknown 39 | var ( 40 | finished, failed int32 41 | ) 42 | for _, job := range jl.Items { 43 | if job.Status.Active != 0 { 44 | continue 45 | } 46 | finished++ 47 | 48 | if job.Status.Failed > 0 { 49 | failed++ 50 | } 51 | } 52 | 53 | msg := fmt.Sprintf("%d/%d jobs complete, %d failed", finished, k6.GetSpec().Parallelism, failed) 54 | log.Info(msg) 55 | 56 | if v1alpha1.IsTrue(k6, v1alpha1.CloudTestRun) && failed > 0 { 57 | events := cloud.ErrorEvent(cloud.K6OperatorRunnerError). 58 | WithDetail(msg). 59 | WithAbort() 60 | cloud.SendTestRunEvents(r.k6CloudClient, k6.TestRunID(), log, events) 61 | } 62 | 63 | if finished < k6.GetSpec().Parallelism { 64 | return 65 | } 66 | 67 | allFinished = true 68 | return 69 | } 70 | -------------------------------------------------------------------------------- /controllers/k6_start.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | "time" 9 | 10 | "github.com/go-logr/logr" 11 | "github.com/grafana/k6-operator/api/v1alpha1" 12 | "github.com/grafana/k6-operator/pkg/cloud" 13 | "github.com/grafana/k6-operator/pkg/resources/jobs" 14 | v1 "k8s.io/api/core/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | ctrl "sigs.k8s.io/controller-runtime" 17 | ) 18 | 19 | func isServiceReady(log logr.Logger, service *v1.Service) bool { 20 | resp, err := http.Get(fmt.Sprintf("http://%v:6565/v1/status", service.Spec.ClusterIP)) 21 | 22 | if err != nil { 23 | log.Error(err, fmt.Sprintf("failed to get status from %v", service.ObjectMeta.Name)) 24 | return false 25 | } 26 | 27 | return resp.StatusCode < 400 28 | } 29 | 30 | // StartJobs in the Ready phase using a curl container 31 | func StartJobs(ctx context.Context, log logr.Logger, k6 *v1alpha1.TestRun, r *TestRunReconciler) (res ctrl.Result, err error) { 32 | // It may take some time to get Services up, so check in frequently 33 | res = ctrl.Result{RequeueAfter: time.Second} 34 | 35 | if len(k6.GetStatus().TestRunID) > 0 { 36 | log = log.WithValues("testRunId", k6.GetStatus().TestRunID) 37 | } 38 | 39 | log.Info("Waiting for pods to get ready") 40 | 41 | opts := k6.ListOptions() 42 | 43 | pl := &v1.PodList{} 44 | if err = r.List(ctx, pl, opts); err != nil { 45 | log.Error(err, "Could not list pods") 46 | return res, nil 47 | } 48 | 49 | var count int 50 | for _, pod := range pl.Items { 51 | if pod.Status.Phase != "Running" { 52 | continue 53 | } 54 | count++ 55 | } 56 | 57 | log.Info(fmt.Sprintf("%d/%d runner pods ready", count, k6.GetSpec().Parallelism)) 58 | 59 | if count != int(k6.GetSpec().Parallelism) { 60 | if t, ok := v1alpha1.LastUpdate(k6, v1alpha1.TestRunRunning); !ok { 61 | // this should never happen 62 | return res, errors.New("Cannot find condition TestRunRunning") 63 | } else { 64 | // let's try this approach 65 | if time.Since(t).Minutes() > 5 { 66 | msg := fmt.Sprintf(errMessageTooLong, "runner pods", "runner jobs and pods") 67 | log.Info(msg) 68 | 69 | if v1alpha1.IsTrue(k6, v1alpha1.CloudTestRun) { 70 | events := cloud.ErrorEvent(cloud.K6OperatorStartError). 71 | WithDetail(msg). 72 | WithAbort() 73 | cloud.SendTestRunEvents(r.k6CloudClient, k6.TestRunID(), log, events) 74 | } 75 | } 76 | } 77 | 78 | return res, nil 79 | } 80 | 81 | // services 82 | 83 | log.Info("Waiting for services to get ready") 84 | 85 | hostnames, err := r.hostnames(ctx, log, true, opts) 86 | log.Info(fmt.Sprintf("err: %v, hostnames: %v", err, hostnames)) 87 | if err != nil { 88 | return ctrl.Result{}, err 89 | } 90 | 91 | log.Info(fmt.Sprintf("%d/%d services ready", len(hostnames), k6.GetSpec().Parallelism)) 92 | 93 | // setup 94 | 95 | if v1alpha1.IsTrue(k6, v1alpha1.CloudPLZTestRun) { 96 | if err := runSetup(ctx, hostnames, log); err != nil { 97 | return ctrl.Result{}, err 98 | } 99 | } 100 | 101 | // starter 102 | 103 | starter := jobs.NewStarterJob(k6, hostnames) 104 | 105 | if err = ctrl.SetControllerReference(k6, starter, r.Scheme); err != nil { 106 | log.Error(err, "Failed to set controller reference for the start job") 107 | } 108 | 109 | // TODO: add a check for existence of starter job 110 | 111 | if err = r.Create(ctx, starter); err != nil { 112 | log.Error(err, "Failed to launch k6 test starter") 113 | return res, nil 114 | } 115 | 116 | log.Info("Created starter job") 117 | 118 | log.Info("Changing stage of TestRun status to started") 119 | k6.GetStatus().Stage = "started" 120 | v1alpha1.UpdateCondition(k6, v1alpha1.TestRunRunning, metav1.ConditionTrue) 121 | 122 | if updateHappened, err := r.UpdateStatus(ctx, k6, log); err != nil { 123 | return ctrl.Result{}, err 124 | } else if updateHappened { 125 | return ctrl.Result{Requeue: true}, nil 126 | } 127 | return ctrl.Result{}, nil 128 | } 129 | -------------------------------------------------------------------------------- /controllers/k6_stop.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/go-logr/logr" 8 | "github.com/grafana/k6-operator/api/v1alpha1" 9 | "github.com/grafana/k6-operator/pkg/resources/jobs" 10 | v1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/labels" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | // StopJobs in the Ready phase using a curl container 18 | // It assumes that Services of the runners are already up and 19 | // test is being executed. 20 | func StopJobs(ctx context.Context, log logr.Logger, k6 *v1alpha1.TestRun, r *TestRunReconciler) (res ctrl.Result, err error) { 21 | if len(k6.GetStatus().TestRunID) > 0 { 22 | log = log.WithValues("testRunId", k6.GetStatus().TestRunID) 23 | } 24 | 25 | selector := labels.SelectorFromSet(map[string]string{ 26 | "app": "k6", 27 | "k6_cr": k6.NamespacedName().Name, 28 | "runner": "true", 29 | }) 30 | 31 | opts := &client.ListOptions{LabelSelector: selector, Namespace: k6.NamespacedName().Namespace} 32 | 33 | var hostnames []string 34 | sl := &v1.ServiceList{} 35 | 36 | if err = r.List(ctx, sl, opts); err != nil { 37 | log.Error(err, "Could not list services") 38 | return res, nil 39 | } 40 | 41 | for _, service := range sl.Items { 42 | hostnames = append(hostnames, service.Spec.ClusterIP) 43 | } 44 | 45 | stopJob := jobs.NewStopJob(k6, hostnames) 46 | 47 | if err = ctrl.SetControllerReference(k6, stopJob, r.Scheme); err != nil { 48 | log.Error(err, "Failed to set controller reference for the stop job") 49 | } 50 | 51 | // TODO: add a check for existence of stop job 52 | 53 | if err = r.Create(ctx, stopJob); err != nil { 54 | log.Error(err, "Failed to launch k6 test stop job.") 55 | return res, nil 56 | } 57 | 58 | log.Info("Created stop job") 59 | 60 | log.Info("Changing stage of TestRun status to stopped") 61 | k6.GetStatus().Stage = "stopped" 62 | v1alpha1.UpdateCondition(k6, v1alpha1.TestRunRunning, metav1.ConditionFalse) 63 | v1alpha1.UpdateCondition(k6, v1alpha1.CloudTestRunAborted, metav1.ConditionTrue) 64 | 65 | if updateHappened, err := r.UpdateStatus(ctx, k6, log); err != nil { 66 | return ctrl.Result{}, err 67 | } else if updateHappened { 68 | return ctrl.Result{RequeueAfter: time.Second}, nil 69 | } 70 | return ctrl.Result{}, nil 71 | } 72 | -------------------------------------------------------------------------------- /controllers/k6_stopped_jobs.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | 10 | "github.com/go-logr/logr" 11 | "github.com/grafana/k6-operator/api/v1alpha1" 12 | k6api "go.k6.io/k6/api/v1" 13 | batchv1 "k8s.io/api/batch/v1" 14 | v1 "k8s.io/api/core/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/labels" 17 | "sigs.k8s.io/controller-runtime/pkg/client" 18 | ) 19 | 20 | func isJobRunning(log logr.Logger, service *v1.Service) bool { 21 | resp, err := http.Get(fmt.Sprintf("http://%v:6565/v1/status", service.Spec.ClusterIP)) 22 | if err != nil { 23 | return false 24 | } 25 | 26 | // Response has been received so assume the job is running. 27 | 28 | if resp.StatusCode >= 400 { 29 | log.Error(err, fmt.Sprintf("status from from runner job %v is %d", service.ObjectMeta.Name, resp.StatusCode)) 30 | return true 31 | } 32 | 33 | defer resp.Body.Close() 34 | 35 | data, err := io.ReadAll(resp.Body) 36 | if err != nil { 37 | log.Error(err, fmt.Sprintf("Error on reading status of the runner job %v", service.ObjectMeta.Name)) 38 | return true 39 | } 40 | 41 | var status k6api.StatusJSONAPI 42 | if err := json.Unmarshal(data, &status); err != nil { 43 | log.Error(err, fmt.Sprintf("Error on parsing status of the runner job %v", service.ObjectMeta.Name)) 44 | return true 45 | } 46 | 47 | return status.Status().Running 48 | } 49 | 50 | // StoppedJobs checks if the runners pods have stopped execution. 51 | func StoppedJobs(ctx context.Context, log logr.Logger, k6 *v1alpha1.TestRun, r *TestRunReconciler) (allStopped bool) { 52 | if len(k6.GetStatus().TestRunID) > 0 { 53 | log = log.WithValues("testRunId", k6.GetStatus().TestRunID) 54 | } 55 | 56 | log.Info("Waiting for pods to stop the test run") 57 | 58 | selector := labels.SelectorFromSet(map[string]string{ 59 | "app": "k6", 60 | "k6_cr": k6.NamespacedName().Name, 61 | "runner": "true", 62 | }) 63 | 64 | opts := &client.ListOptions{LabelSelector: selector, Namespace: k6.NamespacedName().Namespace} 65 | 66 | sl := &v1.ServiceList{} 67 | 68 | if err := r.List(ctx, sl, opts); err != nil { 69 | log.Error(err, "Could not list services") 70 | return 71 | } 72 | 73 | var runningJobs int32 74 | for _, service := range sl.Items { 75 | 76 | if isJobRunning(log, &service) { 77 | runningJobs++ 78 | } 79 | } 80 | 81 | log.Info(fmt.Sprintf("%d/%d runners stopped execution", k6.GetSpec().Parallelism-runningJobs, k6.GetSpec().Parallelism)) 82 | 83 | if runningJobs > 0 { 84 | return 85 | } 86 | 87 | allStopped = true 88 | return 89 | } 90 | 91 | // KillJobs retrieves all runner jobs and attempts to delete them 92 | // with propagation policy so that corresponding pods are deleted as well. 93 | // On failure, error is returned. 94 | // On success, error is nil and allDeleted shows if all retrieved jobs were deleted. 95 | func KillJobs(ctx context.Context, log logr.Logger, k6 *v1alpha1.TestRun, r *TestRunReconciler) (allDeleted bool, err error) { 96 | if len(k6.GetStatus().TestRunID) > 0 { 97 | log = log.WithValues("testRunId", k6.GetStatus().TestRunID) 98 | } 99 | 100 | log.Info("Killing all runner jobs.") 101 | 102 | selector := labels.SelectorFromSet(map[string]string{ 103 | "app": "k6", 104 | "k6_cr": k6.NamespacedName().Name, 105 | "runner": "true", 106 | }) 107 | 108 | opts := &client.ListOptions{LabelSelector: selector, Namespace: k6.NamespacedName().Namespace} 109 | jl := &batchv1.JobList{} 110 | 111 | if err = r.List(ctx, jl, opts); err != nil { 112 | log.Error(err, "Could not list jobs") 113 | return 114 | } 115 | 116 | var deleteCount int 117 | 118 | propagationPolicy := client.PropagationPolicy(metav1.DeletionPropagation(metav1.DeletePropagationBackground)) 119 | for _, job := range jl.Items { 120 | if err = r.Delete(ctx, &job, propagationPolicy); err != nil { 121 | log.Error(err, fmt.Sprintf("Failed to delete runner job %s", job.Name)) 122 | // do we need to retry here? 123 | } 124 | deleteCount++ 125 | } 126 | 127 | return deleteCount == len(jl.Items), nil 128 | } 129 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package controllers 16 | 17 | import ( 18 | "path/filepath" 19 | "testing" 20 | 21 | . "github.com/onsi/ginkgo" 22 | . "github.com/onsi/gomega" 23 | "k8s.io/client-go/kubernetes/scheme" 24 | "k8s.io/client-go/rest" 25 | "sigs.k8s.io/controller-runtime/pkg/client" 26 | "sigs.k8s.io/controller-runtime/pkg/envtest" 27 | logf "sigs.k8s.io/controller-runtime/pkg/log" 28 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 29 | 30 | k6v1alpha1 "github.com/grafana/k6-operator/api/v1alpha1" 31 | // +kubebuilder:scaffold:imports 32 | ) 33 | 34 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 35 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 36 | 37 | var cfg *rest.Config 38 | var k8sClient client.Client 39 | var testEnv *envtest.Environment 40 | 41 | func TestAPIs(t *testing.T) { 42 | RegisterFailHandler(Fail) 43 | 44 | RunSpecs(t, "Controller Suite") 45 | } 46 | 47 | var _ = BeforeSuite(func(done Done) { 48 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter))) 49 | 50 | By("bootstrapping test environment") 51 | testEnv = &envtest.Environment{ 52 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 53 | } 54 | 55 | var err error 56 | cfg, err = testEnv.Start() 57 | Expect(err).ToNot(HaveOccurred()) 58 | Expect(cfg).ToNot(BeNil()) 59 | 60 | err = k6v1alpha1.AddToScheme(scheme.Scheme) 61 | Expect(err).NotTo(HaveOccurred()) 62 | 63 | err = k6v1alpha1.AddToScheme(scheme.Scheme) 64 | Expect(err).NotTo(HaveOccurred()) 65 | 66 | // +kubebuilder:scaffold:scheme 67 | 68 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 69 | Expect(err).ToNot(HaveOccurred()) 70 | Expect(k8sClient).ToNot(BeNil()) 71 | 72 | close(done) 73 | }, 60) 74 | 75 | var _ = AfterSuite(func() { 76 | By("tearing down the test environment") 77 | err := testEnv.Stop() 78 | Expect(err).ToNot(HaveOccurred()) 79 | }) 80 | -------------------------------------------------------------------------------- /docs/env-vars.md: -------------------------------------------------------------------------------- 1 | # Environment variables 2 | 3 | There are some tricky scenarios that can come up when trying to pass environment variables to k6-operator tests, esp. when archives are involved. Let's describe them. 4 | 5 | Assuming I have a `test.js` with the following options: 6 | 7 | ```js 8 | const VUS = __ENV.TEST_VUS || 10000; 9 | 10 | export const options = { 11 | vus: VUS, 12 | duration: '300s', 13 | setupTimeout: '600s' 14 | }; 15 | ``` 16 | 17 | Firstly, it is a recommended practice to set the default value of environment variable definition as here: 18 | ```js 19 | const VUS = __ENV.TEST_VUS || 10000; 20 | ``` 21 | 22 | This way, the script won't break even if there are changes in the setup. 23 | 24 | Within k6-operator context, there are several ways the `TEST_VUS` can be configured for `test.js`. Let's look into some of them. 25 | 26 | ## ConfigMap with `test.js` 27 | 28 | ConfigMap `env-test` contains a `test.js` file. TestRun definition: 29 | 30 | ```yaml 31 | apiVersion: k6.io/v1alpha1 32 | kind: TestRun 33 | metadata: 34 | name: k6-sample 35 | spec: 36 | parallelism: 2 37 | script: 38 | configMap: 39 | name: "env-test" 40 | file: "test.js" 41 | runner: 42 | env: 43 | - name: TEST_VUS 44 | value: "42" 45 | ``` 46 | 47 | In this case, there will be 42 VUs in total, equally split between 2 runners. This can be confirmed with [k6 execution API](https://grafana.com/docs/k6/latest/javascript-api/k6-execution/). 48 | 49 | ## ConfigMap has `archive.tar` with system env vars 50 | 51 | `archive.tar` was created with the following command: 52 | 53 | ```bash 54 | TEST_VUS=4 k6 archive --include-system-env-vars test.js 55 | ``` 56 | 57 | ConfigMap `env-test` contains `archive.tar`. TestRun definition: 58 | 59 | ```yaml 60 | apiVersion: k6.io/v1alpha1 61 | kind: TestRun 62 | metadata: 63 | name: k6-sample 64 | spec: 65 | parallelism: 2 66 | script: 67 | configMap: 68 | name: "env-test" 69 | file: "archive.tar" 70 | runner: 71 | env: 72 | - name: TEST_VUS 73 | value: "42" 74 | ``` 75 | 76 | In this case, there will be 4 VUs in total, split between 2 runners, and at the same time `TEST_VUS` env variable inside the script will be equal to 42. 77 | 78 | ## `ConfigMap` has `archive.tar` without system env vars 79 | 80 | `archive.tar` was created with the following command: 81 | 82 | ```bash 83 | TEST_VUS=4 k6 archive test.js 84 | ``` 85 | 86 | ConfigMap `env-test` and TestRun `k6-sample` are as in previous case. Then there will be 10000 VUs in total, split between 2 runners. And `TEST_VUS` inside the script will be 42 again. 87 | 88 | # Conclusion 89 | 90 | This is not fully k6-operator defined behaviour: the operator only passes the env vars to the pods. But if one uses `k6 archive` to create a script, the VUs will be defined at that step. This difference between configuration for `k6 archive` and `k6 run` is described in the docs [here](https://grafana.com/docs/k6/latest/misc/archive/#contents-of-an-archive-file) and [here](https://grafana.com/docs/k6/latest/using-k6/environment-variables/). 91 | -------------------------------------------------------------------------------- /docs/plz.md: -------------------------------------------------------------------------------- 1 | # Private Load Zone under the hood 2 | 3 | Private Load Zone (PLZ) feature requires k6-operator to communicate regularly with Grafana Cloud k6 (GCk6) API. This document aims to describe the data flow of that communication. 4 | 5 | ## PLZ lifecycle 6 | 7 | PLZ resource is explicitly created and explicitly destroyed by the user, e.g. with standard `kubectl` tooling. Once PLZ resource is created, k6-operator registers it with GCk6 and starts a polling loop. GCk6 is polled each 10 seconds for new test runs to be executed on the PLZ: 8 | 9 | ```mermaid 10 | sequenceDiagram 11 | participant GCk6 API 12 | participant Kubernetes 13 | participant k6-operator 14 | actor Alice (SRE) 15 | Alice (SRE)->>Kubernetes: kubectl apply -f plz.yaml 16 | k6-operator->>GCk6 API: Register PLZ 17 | loop Polling for PLZ test runs: 10sec 18 | 19 | k6-operator->>GCk6 API: Get test runs for PLZ 20 | activate GCk6 API 21 | GCk6 API-->>k6-operator: List of test run IDs 22 | deactivate GCk6 API 23 | 24 | end 25 | 26 | Alice (SRE)->>Kubernetes: kubectl delete -f plz.yaml 27 | k6-operator->>GCk6 API: Deregister PLZ 28 | ``` 29 | 30 | In other words, there are three HTTP REST calls to GCk6 in the above workflow: to register, to deregister and to poll test runs. More on GCk6 REST API can be found [here](https://grafana.com/docs/grafana-cloud/k6/reference/cloud-rest-api/#read-test-runs). 31 | 32 | ## Lifecycle of PLZ test run 33 | 34 | When a user starts any GCk6 test run, k6 first creates an [archive](https://grafana.com/docs/k6/latest/misc/archive/#k6-cloud-execution) with it and sends it to GCk6. First GCk6 executes internal validation of the archive and, in case of PLZ test run, stores the archive to AWS S3 with the [presigned URL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html) and expiration time set to 300 seconds. GCk6 then notifies k6-operator about this test run during the next polling check, as described [above](#plz-lifecycle). 35 | 36 | Once k6-operator learns of a new PLZ test run, it requests additional info about this test run from GCk6. Test run data returned by GCk6 contains presigned URL to S3 bucket and some additional information, like public Docker image containing k6 (`grafana/k6`) and amount of runners needed for this test run. Using this information, k6-operator then creates a `TestRun` custom resource (CR). 37 | 38 | ```mermaid 39 | sequenceDiagram 40 | actor Bob (QA) 41 | participant k6 42 | participant GCk6 API 43 | participant AWS S3 44 | participant k6-operator 45 | participant Kubernetes 46 | 47 | Bob (QA)->>k6: k6 cloud plz-test-X.js 48 | k6->>GCk6 API: k6 archive for X 49 | activate GCk6 API 50 | GCk6 API->>AWS S3: Store k6 archive for X 51 | deactivate GCk6 API 52 | Note over k6-operator: Oh, I have a PLZ test run X! 53 | 54 | k6-operator->>GCk6 API: Get data for test run X 55 | activate GCk6 API 56 | GCk6 API-->>k6-operator: Data for test run X 57 | deactivate GCk6 API 58 | k6-operator->>Kubernetes: Create TestRun CR 59 | rect rgb(191, 223, 255) 60 | note right of k6-operator: Running PLZ test run 61 | %% create participant runners 62 | runners->>AWS S3: Download k6 archive for X 63 | k6-operator->>runners: Start the test 64 | loop Test Run execution 65 | runners->>GCk6 API: cloud output 66 | k6-operator->>GCk6 API: Get test run state 67 | activate GCk6 API 68 | GCk6 API-->>k6-operator: Running OK! 69 | deactivate GCk6 API 70 | end 71 | end 72 | ``` 73 | 74 | As the PLZ `TestRun` has presigned URL configured as a path to k6 script, each runner Pod will download k6 archive from this URL in init container. The PLZ `TestRun` is also configured as a [cloud output test run](https://grafana.com/docs/k6/latest/results-output/real-time/cloud/) so runners are streaming metrics to GCk6 for aggregation, storage and visualization. 75 | 76 | Otherwise, PLZ `TestRun` is processed by k6-operator as any other `TestRun`, but with two additional HTTP REST calls to GCk6: 77 | - a call that checks if test run is being processed without error by GCk6 and whether there is a user abort 78 | - _optional_ a call that sends events about errors to GCk6 in case the test cannot be executed (e.g. something is off with infrastructure) 79 | -------------------------------------------------------------------------------- /docs/releases.md: -------------------------------------------------------------------------------- 1 | ## Release workflow 2 | 3 | Current release process is rather heavy on manual interventions: 4 | 5 | 1. _manual_ Create a Github release. 6 | 2. "Release" workflow is triggered: 7 | - build of new Docker images from `main` 8 | - PR to update bundle 9 | 3. _manual_ Review and merge PR with bundle update. 10 | 4. _manual_ Commit and push the following changes: 11 | - Update Makefile with latest version. 12 | - Update `docs/versioning.md`. 13 | - Update CRDs in Helm chart if needed. 14 | - Update k6-operator's version in `values.yaml` and bump `Chart.yaml` 15 | - Run `make helm-docs` to update the auto-generated documentation for the Chart 16 | - Run `make helm-schema` to update the schema file. 17 | - Commit the changes: 18 | ```bash 19 | git add charts/k6-operator/Chart.yaml charts/k6-operator/README.md charts/k6-operator/values.yaml charts/k6-operator/values.schema.json docs/versioning.md Makefile 20 | git commit -m 'release: update for v0.0.x' 21 | ``` 22 | 5. "Helm release" workflow is triggered, publishing to Helm Grafana repo. 23 | -------------------------------------------------------------------------------- /docs/versioning.md: -------------------------------------------------------------------------------- 1 | ## k6 versioning 2 | 3 | Since around v0.0.7 k6-operator has been releasing a default runner image together with each pre-release and release. It is built from the `grafana/k6:latest` so the version of k6 in the default runner's image highly depends on the time of the build itself. For now let's keep this info in a table for the historical record and for ease of reference. 4 | 5 | | k6-operator version | runner tag | k6 version in runner image | 6 | |:-------------------:|:----------:|:--------------------------:| 7 | | v0.0.7rc | [runner-v0.0.7rc](ghcr.io/grafana/operator:runner-v0.0.7rc) | v0.33.0 | 8 | | v0.0.7rc2 | [runner-v0.0.7rc2](ghcr.io/grafana/operator:runner-v0.0.7rc2) | v0.33.0 | 9 | | v0.0.7rc3 | [runner-v0.0.7rc3](ghcr.io/grafana/operator:runner-v0.0.7rc3) | v0.34.1 | 10 | | v0.0.7rc4 | [runner-v0.0.7rc4](ghcr.io/grafana/operator:runner-v0.0.7rc4) | v0.36.0 | 11 | | v0.0.7 | [runner-v0.0.7](ghcr.io/grafana/operator:runner-v0.0.7) | v0.38.2 | 12 | | v0.0.8rc1 | [runner-v0.0.8rc1](ghcr.io/grafana/operator:runner-v0.0.8rc1) | v0.38.3 | 13 | | v0.0.8rc2 | [runner-v0.0.8rc2](ghcr.io/grafana/operator:runner-v0.0.8rc2) | N/A | 14 | | v0.0.8rc3 | [runner-v0.0.8rc3](ghcr.io/grafana/operator:runner-v0.0.8rc3) | v0.40.0 | 15 | | v0.0.8 | [runner-v0.0.8](ghcr.io/grafana/operator:runner-v0.0.8) | v0.41.0 | 16 | | v0.0.9rc1 | [runner-v0.0.9rc1](ghcr.io/grafana/operator:runner-v0.0.9rc1) | v0.41.0 | 17 | | v0.0.9rc2 | [runner-v0.0.9rc2](ghcr.io/grafana/operator:runner-v0.0.9rc2) | v0.42.0 | 18 | | v0.0.9rc3 | [runner-v0.0.9rc3](ghcr.io/grafana/operator:runner-v0.0.9rc3) | v0.42.0 | 19 | | v0.0.9 | [runner-v0.0.9](ghcr.io/grafana/operator:runner-v0.0.9) | v0.44.0 | 20 | | v0.0.10rc1 | [runner-v0.0.10rc1](ghcr.io/grafana/operator:runner-v0.0.10rc1) | v0.44.1 | 21 | | v0.0.10rc2 | [runner-v0.0.10rc2](ghcr.io/grafana/operator:runner-v0.0.10rc2) | v0.45.0 | 22 | | v0.0.10rc3 | [runner-v0.0.10rc3](ghcr.io/grafana/k6-operator:runner-v0.0.10rc3) | v0.45.0 | 23 | | v0.0.10 | [runner-v0.0.10](ghcr.io/grafana/k6-operator:runner-v0.0.10) | v0.46.0 | 24 | | v0.0.11rc1 | [runner-v0.0.11rc1](ghcr.io/grafana/k6-operator:runner-v0.0.11rc1) | v0.46.0 | 25 | | v0.0.11rc2 | [runner-v0.0.11rc2](ghcr.io/grafana/k6-operator:runner-v0.0.11rc2) | v0.46.0 | 26 | | v0.0.11rc3 | [runner-v0.0.11rc3](ghcr.io/grafana/k6-operator:runner-v0.0.11rc3) | v0.46.0 | 27 | | v0.0.11 | [runner-v0.0.11](ghcr.io/grafana/k6-operator:runner-v0.0.11) | v0.47.0 | 28 | | v0.0.12rc1 | [runner-v0.0.12rc1](ghcr.io/grafana/k6-operator:runner-v0.0.12rc1) | v0.47.0 | 29 | | v0.0.12 | [runner-v0.0.12](ghcr.io/grafana/k6-operator:runner-v0.0.12) | v0.48.0 | 30 | | v0.0.13rc1 | [runner-v0.0.13rc1](ghcr.io/grafana/k6-operator:runner-v0.0.13rc1) | v0.48.0 | 31 | | v0.0.13 | [runner-v0.0.13](ghcr.io/grafana/k6-operator:runner-v0.0.13) | v0.49.0 | 32 | | v0.0.14 | [runner-v0.0.14](ghcr.io/grafana/k6-operator:runner-v0.0.14) | v0.50.0 | 33 | | v0.0.15 | [runner-v0.0.15](ghcr.io/grafana/k6-operator:runner-v0.0.15) | v0.52.0 | 34 | | v0.0.16 | [runner-v0.0.16](ghcr.io/grafana/k6-operator:runner-v0.0.16) | v0.52.0 | 35 | | v0.0.17 | [runner-v0.0.17](ghcr.io/grafana/k6-operator:runner-v0.0.17) | v0.54.0 | 36 | | v0.0.18 | [runner-v0.0.18](ghcr.io/grafana/k6-operator:runner-v0.0.18) | v0.55.0 | 37 | | v0.0.19 | [runner-v0.0.19](ghcr.io/grafana/k6-operator:runner-v0.0.19) | v0.56.0 | 38 | | v0.0.20 | [runner-v0.0.20](ghcr.io/grafana/k6-operator:runner-v0.0.20) | v1.0.0-rc1 | 39 | | v0.0.21 | [runner-v0.0.21](ghcr.io/grafana/k6-operator:runner-v0.0.21) | v1.0.0 | 40 | 41 | ### What was used before? 42 | 43 | Previously k6-operator has been relying on `loadimpact/k6:latest` image by default. That changed with addition of Istio support and then CI additions. Since then k6-operator is using `ghcr.io/grafana/operator:latest-runner` as a default image. -------------------------------------------------------------------------------- /e2e/README.md: -------------------------------------------------------------------------------- 1 | # E2E tests for k6-operator 2 | 3 | This is a basic suite of E2E tests for k6-operator, covering some of the main use cases. It can be executed all at once (sequentially) or by picking up a single test: 4 | 5 | ```sh 6 | # execute all tests one-by-one 7 | ./run-tests.sh 8 | 9 | # execute just one test by name of the folder 10 | ./run-tests.sh -p basic-testrun-1 11 | 12 | # NOTE: `ipv6` folder is currently an exception and cannot be started in this way 13 | ``` 14 | 15 | It is assumed that there is a Kubernetes cluster to execute the tests in and `kubectl` with access to it. 16 | 17 | ## Under the hood 18 | 19 | `run-tests.sh` does not build any images but it can be customized with custom image and a tag for k6-operator image. At the same time the script uses the current k6-operator folder to get manifests. So for example, in order to test a certain branch, one has to switch to that branch locally first. 20 | 21 | Each test is executed with [xk6-environment](https://github.com/grafana/xk6-environment) extension, bootstraping a virtual cluster and full isolation per test. After a test is finished, everything connected to it is removed, leaving a cluster in its initial state. While the test executes, it is not recommended to interact with the cluster unless it's for monitoring or debugging purposes. 22 | 23 | ### GCk6 tests 24 | 25 | In order to execute Grafana Cloud k6 tests (cloud output or, in the future, PLZ), one is expected to create environment variables containing the tokens for authentication: 26 | 27 | ```sh 28 | # personal GCk6 token 29 | # Encode your token with base64: 30 | echo -n '' | base64 31 | 32 | # The output can contain an additional newline in terminal so remove it, then export it like this: 33 | export CLOUD_TOKEN=... # in base64! 34 | ``` 35 | 36 | A similar process will be needed for an organization token (required for PLZ): 37 | ```sh 38 | echo -n '' | base64 39 | 40 | export CLOUD_ORG_TOKEN=... # in base64! 41 | ``` 42 | 43 | ## How to add a test 44 | 45 | Firstly, the existing tests can be used as a basis for many additional experiments. Otherwise, the skeleton for the test looks like this: 46 | 47 | ```sh 48 | new-test 49 | ├── manifests 50 | │   ├── configmap.yaml # contains k6 script for the TestRun 51 | │   └── kustomization.yaml # a gathering point for all required manifests 52 | ├── test.js # the test with xk6-environment setup 53 | └── testrun.yaml # TestRun to test 54 | ``` 55 | 56 | `kustomization.yaml` file must include the `latest` folder. To create it quickly, this shortcut can be used: 57 | ```sh 58 | cd $folder/manifests 59 | kustomize create --autodetect --recursive --resources ../../latest/ 60 | ``` -------------------------------------------------------------------------------- /e2e/basic-testrun-1/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test 5 | namespace: default 6 | data: 7 | test.js: | 8 | import http from 'k6/http'; 9 | import { check } from 'k6'; 10 | 11 | export let options = { 12 | stages: [ 13 | { target: 200, duration: '30s' }, 14 | { target: 0, duration: '30s' }, 15 | ], 16 | }; 17 | 18 | export default function () { 19 | const result = http.get('https://quickpizza.grafana.com'); 20 | check(result, { 21 | 'http response status code is 200': result.status === 200, 22 | }); 23 | } 24 | -------------------------------------------------------------------------------- /e2e/basic-testrun-1/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | -------------------------------------------------------------------------------- /e2e/basic-testrun-1/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "basic-testrun-1", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | export default function () { 24 | let err = env.apply(PARENT + "testrun.yaml"); 25 | console.log("apply testrun returns", err) 26 | 27 | err = env.wait({ 28 | kind: "TestRun", 29 | name: "k6-sample", //tr.name(), 30 | namespace: "default", //tr.namespace(), 31 | status_key: "stage", 32 | status_value: "finished", 33 | }, { 34 | timeout: "5m", 35 | interval: "1m", 36 | }); 37 | 38 | if (err != null) { 39 | fail("wait returns" + err); 40 | } 41 | } 42 | 43 | export function teardown() { 44 | console.log("delete returns", env.delete()); 45 | } -------------------------------------------------------------------------------- /e2e/basic-testrun-1/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 1 8 | script: 9 | configMap: 10 | name: "test" 11 | file: "test.js" -------------------------------------------------------------------------------- /e2e/basic-testrun-4/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test 5 | namespace: fancy-testing 6 | data: 7 | test.js: | 8 | // source: https://github.com/grafana/quickpizza/blob/main/k6/foundations/04.metrics.js 9 | import http from "k6/http"; 10 | import { check, sleep } from "k6"; 11 | import { Trend, Counter } from "k6/metrics"; 12 | 13 | const BASE_URL = __ENV.BASE_URL || 'http://localhost:3333'; 14 | 15 | export const options = { 16 | stages: [ 17 | { duration: '5s', target: 5 }, 18 | { duration: '10s', target: 5 }, 19 | { duration: '5s', target: 0 }, 20 | ], 21 | }; 22 | 23 | const pizzas = new Counter('quickpizza_number_of_pizzas'); 24 | const ingredients = new Trend('quickpizza_ingredients'); 25 | 26 | export function setup() { 27 | let res = http.get(BASE_URL) 28 | if (res.status !== 200) { 29 | console.log(`Got unexpected status code ${res.status} when trying to setup. Exiting.`) 30 | } 31 | } 32 | 33 | export default function () { 34 | let restrictions = { 35 | maxCaloriesPerSlice: 500, 36 | mustBeVegetarian: false, 37 | excludedIngredients: ["pepperoni"], 38 | excludedTools: ["knife"], 39 | maxNumberOfToppings: 6, 40 | minNumberOfToppings: 2 41 | } 42 | let res = http.post(`${BASE_URL}/api/pizza`, JSON.stringify(restrictions), { 43 | headers: { 44 | 'Content-Type': 'application/json', 45 | 'X-User-ID': 23423, 46 | }, 47 | }); 48 | check(res, { "status is 200": (res) => res.status === 200 }); 49 | console.log(`${res.json().pizza.name} (${res.json().pizza.ingredients.length} ingredients)`); 50 | pizzas.add(1); 51 | ingredients.add(res.json().pizza.ingredients.length); 52 | sleep(1); 53 | } 54 | 55 | export function teardown(){ 56 | // TODO: Send notification to Slack 57 | console.log("That's all folks!") 58 | } -------------------------------------------------------------------------------- /e2e/basic-testrun-4/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | - namespace.yaml 7 | -------------------------------------------------------------------------------- /e2e/basic-testrun-4/manifests/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: fancy-testing -------------------------------------------------------------------------------- /e2e/basic-testrun-4/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "basic-testrun-4", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | export default function () { 24 | let err = env.apply(PARENT + "testrun.yaml"); 25 | console.log("apply testrun returns", err); 26 | 27 | err = env.wait({ 28 | kind: "TestRun", 29 | name: "k6-sample", //tr.name(), 30 | namespace: "fancy-testing", //tr.namespace(), 31 | status_key: "stage", 32 | status_value: "started", 33 | }, { 34 | timeout: "1m", 35 | interval: "10s", 36 | }); 37 | 38 | if (err != null) { 39 | fail("wait for started returns" + err); 40 | } 41 | 42 | let allPods = env.getN("pods", { 43 | "namespace": "fancy-testing", //tr.namespace() 44 | "app": "k6", 45 | "k6_cr": "k6-sample", //tr.name() 46 | }); 47 | 48 | let runnerPods = env.getN("pods", { 49 | "namespace": "fancy-testing", //tr.namespace() 50 | "app": "k6", 51 | "k6_cr": "k6-sample", //tr.name() 52 | "runner": "true", 53 | }); 54 | 55 | // there should be N runners pods + initializer + starter 56 | if (runnerPods != 4 || allPods != runnerPods + 2) { 57 | fail("wrong number of pods:" + runnerPods + "/" + allPods); 58 | } 59 | 60 | err = env.wait({ 61 | kind: "TestRun", 62 | name: "k6-sample", //tr.name(), 63 | namespace: "fancy-testing", //tr.namespace(), 64 | status_key: "stage", 65 | status_value: "finished", 66 | }, { 67 | timeout: "5m", 68 | interval: "10s", 69 | }); 70 | 71 | // TODO: add check for status of the pods 72 | 73 | if (err != null) { 74 | fail("wait for finished returns" + err); 75 | } 76 | } 77 | 78 | export function teardown() { 79 | console.log("delete returns", env.delete()); 80 | } -------------------------------------------------------------------------------- /e2e/basic-testrun-4/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | namespace: fancy-testing 7 | spec: 8 | parallelism: 4 9 | script: 10 | configMap: 11 | name: "test" 12 | file: "test.js" 13 | arguments: --no-teardown --tag office=hours 14 | runner: 15 | env: 16 | - name: BASE_URL 17 | value: https://pizza.grafana.fun 18 | -------------------------------------------------------------------------------- /e2e/error-stage/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test 5 | namespace: default 6 | data: 7 | test.js: | 8 | import http from 'k6/http'; 9 | import { check } from 'k6'; 10 | 11 | export let options = { 12 | vus: 1, 13 | duration: '50s' 14 | }; 15 | 16 | export default function () { 17 | const result = http.get('https://quickpizza.grafana.com'); 18 | check(result, { 19 | 'http response status code is 200': result.status === 200, 20 | }); 21 | } 22 | -------------------------------------------------------------------------------- /e2e/error-stage/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | -------------------------------------------------------------------------------- /e2e/error-stage/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "error-stage", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | // TestRun should enter error stage on misconfigured parallelism 24 | export default function () { 25 | let err = env.apply(PARENT + "testrun.yaml"); 26 | console.log("apply testrun returns", err) 27 | 28 | err = env.wait({ 29 | kind: "TestRun", 30 | name: "k6-sample", //tr.name(), 31 | namespace: "default", //tr.namespace(), 32 | status_key: "stage", 33 | status_value: "error", 34 | }, { 35 | timeout: "5m", 36 | interval: "1m", 37 | }); 38 | 39 | if (err != null) { 40 | fail("wait returns" + err); 41 | } 42 | } 43 | 44 | export function teardown() { 45 | console.log("delete returns", env.delete()); 46 | } -------------------------------------------------------------------------------- /e2e/error-stage/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 5 8 | script: 9 | configMap: 10 | name: "test" 11 | file: "test.js" -------------------------------------------------------------------------------- /e2e/init-container-volume/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | -------------------------------------------------------------------------------- /e2e/init-container-volume/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "init-container-volume", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | // This test checks at once init container and volumes for runners, 24 | // as well as localFile option. 25 | export default function () { 26 | let err = env.apply(PARENT + "testrun.yaml"); 27 | console.log("apply testrun returns", err) 28 | 29 | // ideally, we should check pod spec here, but this test 30 | // will never finish successfully without init volume working 31 | // as expected as there won't be any script to execute 32 | 33 | err = env.wait({ 34 | kind: "TestRun", 35 | name: "k6-init-container", //tr.name(), 36 | namespace: "default", //tr.namespace(), 37 | status_key: "stage", 38 | status_value: "finished", 39 | }, { 40 | timeout: "5m", 41 | interval: "1m", 42 | }); 43 | 44 | if (err != null) { 45 | fail("wait returns" + err); 46 | } 47 | 48 | let allPods = env.getN("pods", { 49 | "namespace": "default", //tr.namespace() 50 | "app": "k6", 51 | "k6_cr": "k6-init-container", //tr.name() 52 | }); 53 | 54 | // there should be N runners pods + initializer + starter 55 | if (allPods != 2 + 2) { 56 | fail("wrong number of pods:" + allPods + " instead of " + 4); 57 | } 58 | } 59 | 60 | export function teardown() { 61 | console.log("delete returns", env.delete()); 62 | } -------------------------------------------------------------------------------- /e2e/init-container-volume/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-init-container 6 | spec: 7 | parallelism: 2 8 | script: 9 | localFile: /test/bar.js 10 | runner: 11 | initContainers: 12 | - image: busybox:1.28 13 | command: [ 14 | 'sh', 15 | '-c', 16 | 'echo "import http from \"k6/http\"; export const options = { iterations: 100, vus: 10 }; export default function () { const response = http.get(\"https://quickpizza.grafana.com\"); }" > /test/bar.js && cat /test/bar.js' 17 | ] 18 | workingDir: "/test" 19 | volumeMounts: 20 | - mountPath: /test 21 | name: k6-provision-location 22 | volumes: 23 | - emptyDir: {} 24 | name: k6-provision-location 25 | volumeMounts: 26 | - mountPath: /test 27 | name: k6-provision-location -------------------------------------------------------------------------------- /e2e/ipv6/kind-ipv6.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: ipv6 5 | apiServerAddress: 127.0.0.1 6 | -------------------------------------------------------------------------------- /e2e/latest/ClusterRole-k6-operator-manager-role.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: k6-operator-manager-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - secrets 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - services 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - apps 28 | resources: 29 | - deployments 30 | verbs: 31 | - create 32 | - delete 33 | - get 34 | - list 35 | - patch 36 | - update 37 | - watch 38 | - apiGroups: 39 | - batch 40 | resources: 41 | - jobs 42 | verbs: 43 | - create 44 | - delete 45 | - get 46 | - list 47 | - patch 48 | - update 49 | - watch 50 | - apiGroups: 51 | - coordination.k8s.io 52 | resources: 53 | - leases 54 | verbs: 55 | - create 56 | - get 57 | - list 58 | - update 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - pods 63 | - pods/log 64 | verbs: 65 | - get 66 | - list 67 | - watch 68 | - apiGroups: 69 | - k6.io 70 | resources: 71 | - privateloadzones 72 | - testruns 73 | verbs: 74 | - create 75 | - delete 76 | - get 77 | - list 78 | - patch 79 | - update 80 | - watch 81 | - apiGroups: 82 | - k6.io 83 | resources: 84 | - privateloadzones/finalizers 85 | - privateloadzones/status 86 | - testruns/finalizers 87 | - testruns/status 88 | verbs: 89 | - get 90 | - patch 91 | - update 92 | -------------------------------------------------------------------------------- /e2e/latest/ClusterRole-k6-operator-metrics-reader.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: k6-operator-metrics-reader 5 | rules: 6 | - nonResourceURLs: 7 | - /metrics 8 | verbs: 9 | - get 10 | -------------------------------------------------------------------------------- /e2e/latest/ClusterRole-k6-operator-proxy-role.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: k6-operator-proxy-role 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /e2e/latest/ClusterRoleBinding-k6-operator-manager-rolebinding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: k6-operator-manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: k6-operator-manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: k6-operator-controller 12 | namespace: k6-operator-system 13 | -------------------------------------------------------------------------------- /e2e/latest/ClusterRoleBinding-k6-operator-proxy-rolebinding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: k6-operator-proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: k6-operator-proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: k6-operator-controller 12 | namespace: k6-operator-system 13 | -------------------------------------------------------------------------------- /e2e/latest/Deployment-k6-operator-controller-manager.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: k6-operator-controller-manager 7 | namespace: k6-operator-system 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | control-plane: controller-manager 13 | template: 14 | metadata: 15 | labels: 16 | control-plane: controller-manager 17 | spec: 18 | containers: 19 | - args: 20 | - --metrics-addr=127.0.0.1:8080 21 | - --enable-leader-election 22 | command: 23 | - /manager 24 | image: ghcr.io/grafana/k6-operator:controller-v0.0.19 25 | name: manager 26 | resources: 27 | limits: 28 | cpu: 100m 29 | memory: 100Mi 30 | requests: 31 | cpu: 100m 32 | memory: 50Mi 33 | - args: 34 | - --secure-listen-address=0.0.0.0:8443 35 | - --upstream=http://127.0.0.1:8080/ 36 | - --logtostderr=true 37 | - --v=10 38 | image: quay.io/brancz/kube-rbac-proxy:v0.18.2 39 | name: kube-rbac-proxy 40 | ports: 41 | - containerPort: 8443 42 | name: https 43 | serviceAccountName: k6-operator-controller 44 | terminationGracePeriodSeconds: 10 45 | -------------------------------------------------------------------------------- /e2e/latest/Namespace-k6-operator-system.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: k6-operator 6 | control-plane: controller-manager 7 | name: k6-operator-system 8 | -------------------------------------------------------------------------------- /e2e/latest/Role-k6-operator-leader-election-role.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: k6-operator-leader-election-role 5 | namespace: k6-operator-system 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - create 25 | - get 26 | - list 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - events 32 | verbs: 33 | - create 34 | - patch 35 | -------------------------------------------------------------------------------- /e2e/latest/RoleBinding-k6-operator-leader-election-rolebinding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: k6-operator-leader-election-rolebinding 5 | namespace: k6-operator-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: k6-operator-leader-election-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: k6-operator-controller 13 | namespace: k6-operator-system 14 | -------------------------------------------------------------------------------- /e2e/latest/Service-k6-operator-controller-manager-metrics-service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: k6-operator-controller-manager-metrics-service 7 | namespace: k6-operator-system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /e2e/latest/ServiceAccount-k6-operator-controller.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: k6-operator-controller 5 | namespace: k6-operator-system 6 | -------------------------------------------------------------------------------- /e2e/latest/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ClusterRole-k6-operator-manager-role.yml 5 | - ClusterRole-k6-operator-metrics-reader.yml 6 | - ClusterRole-k6-operator-proxy-role.yml 7 | - ClusterRoleBinding-k6-operator-manager-rolebinding.yml 8 | - ClusterRoleBinding-k6-operator-proxy-rolebinding.yml 9 | - CustomResourceDefinition-privateloadzones.k6.io.yaml 10 | - CustomResourceDefinition-testruns.k6.io.yaml 11 | - Deployment-k6-operator-controller-manager.yml 12 | - Namespace-k6-operator-system.yml 13 | - Role-k6-operator-leader-election-role.yml 14 | - RoleBinding-k6-operator-leader-election-rolebinding.yml 15 | - Service-k6-operator-controller-manager-metrics-service.yml 16 | - ServiceAccount-k6-operator-controller.yml 17 | -------------------------------------------------------------------------------- /e2e/multifile/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: multifile 5 | data: 6 | test.js: | 7 | import http from 'k6/http'; 8 | import { fun } from './utils.js'; 9 | 10 | export const options = { 11 | vus: 10, 12 | duration: '30s', 13 | }; 14 | 15 | export default function () { 16 | http.get('https://quickpizza.grafana.com'); 17 | fun(); 18 | } 19 | utils.js: |- 20 | import { sleep } from 'k6'; 21 | 22 | export function fun() { 23 | sleep(1) 24 | } 25 | -------------------------------------------------------------------------------- /e2e/multifile/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | -------------------------------------------------------------------------------- /e2e/multifile/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "multifile", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | export default function () { 24 | let err = env.apply(PARENT + "testrun.yaml"); 25 | console.log("apply testrun returns", err) 26 | 27 | err = env.wait({ 28 | kind: "TestRun", 29 | name: "multifile-test", //tr.name(), 30 | namespace: "default", //tr.namespace(), 31 | status_key: "stage", 32 | status_value: "finished", 33 | }, { 34 | timeout: "2m", 35 | interval: "1s", 36 | }); 37 | if (err != null) { 38 | fail("wait returns" + err); 39 | } 40 | 41 | let allPods = env.getN("pods", { 42 | "namespace": "default", // tr.namespace() 43 | "app": "k6", 44 | "k6_cr": "multifile-test", //tr.name() 45 | }); 46 | 47 | // there should be N runners pods + initializer + starter 48 | if (allPods != 3 + 2) { 49 | fail("wrong number of pods:" + allPods + " instead of " + 5); 50 | } 51 | } 52 | 53 | export function teardown() { 54 | console.log("delete returns", env.delete()); 55 | } -------------------------------------------------------------------------------- /e2e/multifile/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: multifile-test 6 | spec: 7 | parallelism: 3 8 | script: 9 | configMap: 10 | name: "multifile" 11 | file: "test.js" 12 | -------------------------------------------------------------------------------- /e2e/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | show_help() { 4 | echo "Usage: $(basename $0) [-h] [-t GHCR_IMAGE_TAG] [-i IMAGE] [-p TEST_NAME]" 5 | echo "Options:" 6 | echo " -h Display this help message" 7 | echo " -t Existing tag of ghcr.io/grafana/k6-operator image" 8 | echo " -i Arbitrary Docker image for k6-operator" 9 | echo " -p Pick one test (folder) to run separately" 10 | exit 0 11 | } 12 | 13 | exec_test() { 14 | echo "Executing test $TEST_NAME" 15 | cd $TEST_NAME/manifests 16 | for f in *.yaml; do envsubst '$CLOUD_TOKEN' < $f > out && mv out $f; done 17 | cd .. # back to $TEST_NAME 18 | if ! ../k6 run test.js ; then 19 | echo "Test $TEST_NAME failed" 20 | cd .. # back to root 21 | exit 1 22 | fi 23 | cd .. # back to root 24 | } 25 | 26 | # use GHCR latest image by default, unless -i is specified 27 | GHCR_IMAGE_TAG=latest 28 | IMAGE= 29 | TEST_NAME= 30 | 31 | while getopts ':ht:i:p:' option; do 32 | case "$option" in 33 | h) show_help 34 | exit 35 | ;; 36 | t) GHCR_IMAGE_TAG=$OPTARG 37 | ;; 38 | i) IMAGE=$OPTARG 39 | ;; 40 | p) TEST_NAME=$OPTARG 41 | ;; 42 | :) printf "missing argument for -%s\n" "$OPTARG" >&2 43 | show_help 44 | exit 1 45 | ;; 46 | \?) printf "illegal option: -%s\n" "$OPTARG" >&2 47 | show_help 48 | exit 1 49 | ;; 50 | esac 51 | done 52 | # shift $((OPTIND - 1)) 53 | 54 | if [ -z "${IMAGE}" ]; then 55 | IMAGE=ghcr.io/grafana/k6-operator:$GHCR_IMAGE_TAG 56 | fi 57 | 58 | echo "Using k6-operator image:" $IMAGE 59 | 60 | # Recreate kustomization. 61 | 62 | echo "Regenerate ./latest from the bundle" 63 | 64 | rm latest/* 65 | 66 | # use an existing bundle.yaml if it fits the image or generate a new one 67 | if [ "$IMAGE" = "ghcr.io/grafana/k6-operator:latest" ]; then 68 | cp ../bundle.yaml ./latest/bundle-to-test.yaml 69 | cd latest 70 | else 71 | cd ../config/default 72 | kustomize edit set image $IMAGE && kustomize build . > ../../e2e/latest/bundle-to-test.yaml 73 | cd ../../e2e/latest 74 | fi 75 | 76 | # We're in e2e/latest here and there is a bundle-to-test.yaml 77 | # Split the bundle and create a kustomize 78 | 79 | docker run --user="1001" --rm -v "${PWD}":/workdir mikefarah/yq --no-doc -s '.kind + "-" + .metadata.name' bundle-to-test.yaml 80 | # since CRDs are being extracted as k6.io, without yaml in the end, rename them: 81 | for f in $(find . -type f -name '*.k6.io'); do mv $f ${f}.yaml; done 82 | 83 | rm bundle-to-test.yaml 84 | kustomize create --autodetect --recursive . 85 | 86 | # since CRDs are being extracted as k6.io, without yaml in the end, rename them: 87 | for f in $(find . -type f -name '*.k6.io'); do mv $f ${f}.yaml; done 88 | 89 | # go back to e2e/ 90 | cd .. 91 | 92 | # TODO: add a proper build with xk6-environment (use new functionality?) 93 | # Blocked by: https://github.com/grafana/xk6-environment/issues/16 94 | # Right now, using the pre-built k6 binary uploaded to a branch in xk6-environment 95 | 96 | if [ ! -f ./k6 ]; then 97 | wget https://github.com/grafana/xk6-environment/raw/refs/heads/fix/temp-k6-binary/bin/k6 98 | chmod +x ./k6 99 | fi 100 | 101 | # Run the tests. 102 | 103 | if [ ! -z "${TEST_NAME}" ]; then 104 | exec_test 105 | exit 0 106 | fi 107 | 108 | tests=( 109 | "basic-testrun-1" 110 | "basic-testrun-4" 111 | "testrun-cleanup" 112 | "testrun-archive" 113 | "init-container-volume" 114 | "multifile" 115 | "error-stage" 116 | "testrun-simultaneous" 117 | "testrun-watch-namespace" 118 | "testrun-cloud-output" 119 | "testrun-simultaneous-cloud-output" 120 | # "kyverno" 121 | # "custom-domain" 122 | # "browser-1" 123 | # cloud abort 124 | # plz 125 | # ipv6 126 | ) 127 | 128 | for folder in "${tests[@]}"; do 129 | TEST_NAME=$folder 130 | exec_test 131 | done 132 | -------------------------------------------------------------------------------- /e2e/test-initcontainer-volumes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample-init-container-volume 6 | spec: 7 | parallelism: 1 8 | script: 9 | localFile: /test/test.js 10 | runner: 11 | volumes: 12 | - emptyDir: {} 13 | name: k6-provision-location 14 | volumeMounts: 15 | - mountPath: /test 16 | name: k6-provision-location 17 | initContainers: 18 | - image: busybox:1.28 19 | command: 20 | [ 21 | "sh", 22 | "-c", 23 | "cd /test; wget https://raw.githubusercontent.com/grafana/k6-operator/main/e2e/test.js;", 24 | ] 25 | volumeMounts: 26 | - mountPath: /test 27 | name: k6-provision-location 28 | -------------------------------------------------------------------------------- /e2e/test-initcontainer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample-init-container 6 | spec: 7 | parallelism: 1 8 | script: 9 | configMap: 10 | name: "crocodile-stress-test" 11 | file: "test.js" 12 | runner: 13 | initContainers: 14 | - image: busybox:1.28 15 | command: ['sh', '-c', 'cat /test/test.js'] 16 | - image: busybox:1.28 17 | command: ['sh', '-c', 'echo "Waiting for task...."; sleep 10; echo "Task is done!!!"'] 18 | -------------------------------------------------------------------------------- /e2e/test.js: -------------------------------------------------------------------------------- 1 | import http from 'k6/http'; 2 | import { check } from 'k6'; 3 | 4 | export let options = { 5 | stages: [ 6 | { target: 200, duration: '30s' }, 7 | { target: 0, duration: '30s' }, 8 | ], 9 | }; 10 | 11 | export default function () { 12 | const result = http.get('https://quickpizza.grafana.com'); 13 | check(result, { 14 | 'http response status code is 200': result.status === 200, 15 | }); 16 | } 17 | -------------------------------------------------------------------------------- /e2e/test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 1 8 | script: 9 | configMap: 10 | name: "crocodile-stress-test" 11 | file: "test.js" 12 | -------------------------------------------------------------------------------- /e2e/testrun-archive/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../latest 6 | - configmap.yaml 7 | -------------------------------------------------------------------------------- /e2e/testrun-archive/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "testrun-archive", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | // A simple test to check that archive in ConfigMap works. 24 | export default function () { 25 | 26 | let err = env.apply(PARENT + "testrun.yaml"); 27 | console.log("apply testrun returns", err) 28 | 29 | err = env.wait({ 30 | kind: "TestRun", 31 | name: "k6-sample", //tr.name(), 32 | namespace: "default", //tr.namespace(), 33 | status_key: "stage", 34 | status_value: "finished", 35 | }, { 36 | timeout: "5m", 37 | interval: "1m", 38 | }); 39 | 40 | if (err != null) { 41 | fail("wait returns" + err); 42 | } 43 | } 44 | 45 | export function teardown() { 46 | console.log("delete returns", env.delete()); 47 | } -------------------------------------------------------------------------------- /e2e/testrun-archive/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | spec: 7 | parallelism: 1 8 | script: 9 | configMap: 10 | name: "test" 11 | file: "archive.tar" -------------------------------------------------------------------------------- /e2e/testrun-cleanup/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test 5 | namespace: fancy-testing 6 | data: 7 | test.js: | 8 | import http from 'k6/http'; 9 | import { check } from 'k6'; 10 | 11 | export let options = { 12 | stages: [ 13 | { target: 200, duration: '30s' }, 14 | { target: 0, duration: '30s' }, 15 | ], 16 | }; 17 | 18 | export default function () { 19 | const result = http.get('https://quickpizza.grafana.com'); 20 | check(result, { 21 | 'http response status code is 200': result.status === 200, 22 | }); 23 | } 24 | -------------------------------------------------------------------------------- /e2e/testrun-cleanup/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | - namespace.yaml 7 | -------------------------------------------------------------------------------- /e2e/testrun-cleanup/manifests/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: fancy-testing -------------------------------------------------------------------------------- /e2e/testrun-cleanup/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "testrun-cleanup", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | export default function () { 24 | let err = env.apply(PARENT + "testrun.yaml"); 25 | console.log("apply testrun returns", err) 26 | 27 | // let k6-operator read & bootstrap the TestRun 28 | sleep(10); 29 | 30 | let allPods = env.getN("pods", { 31 | "namespace": "fancy-testing", 32 | "app": "k6", 33 | "k6_cr": "k6-sample", //tr.name() 34 | }); 35 | 36 | // there should be at least initializer pod by now 37 | if (allPods < 1) { 38 | fail("wrong number of pods: " + allPods); 39 | } 40 | 41 | err = env.wait({ 42 | kind: "TestRun", 43 | name: "k6-sample", //tr.name(), 44 | namespace: "fancy-testing", //tr.namespace(), 45 | status_key: "stage", 46 | status_value: "finished", 47 | }, { 48 | timeout: "2m", 49 | interval: "1s", 50 | }); 51 | 52 | // Either wait() will "catch" TestRun at finished stage or 53 | // TestRun will be deleted some time between wait checks. Both 54 | // of those are valid in this case. 55 | if (err != null && err != "context deadline exceeded") { 56 | fail("unexpected error from wait(): " + err); 57 | } 58 | 59 | // there should be no pods at this point 60 | 61 | allPods = env.getN("pods", { 62 | "namespace": "fancy-testing", // tr.namespace() 63 | "app": "k6", 64 | "k6_cr": "k6-sample", //tr.name() 65 | }); 66 | 67 | if (allPods != 0) { 68 | fail("pods were not deleted, there are " + allPods + " pods"); 69 | } 70 | } 71 | 72 | export function teardown() { 73 | console.log("delete returns", env.delete()); 74 | } -------------------------------------------------------------------------------- /e2e/testrun-cleanup/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | namespace: fancy-testing 7 | spec: 8 | parallelism: 1 9 | script: 10 | configMap: 11 | name: "test" 12 | file: "test.js" 13 | cleanup: "post" -------------------------------------------------------------------------------- /e2e/testrun-cloud-output/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test 5 | namespace: k6-tests 6 | data: 7 | test.js: | 8 | // base source: https://github.com/grafana/quickpizza/blob/main/k6/foundations/04.metrics.js 9 | import http from "k6/http"; 10 | import { check, sleep } from "k6"; 11 | import { Trend, Counter } from "k6/metrics"; 12 | 13 | const BASE_URL = __ENV.BASE_URL || 'http://localhost:3333'; 14 | 15 | export const options = { 16 | stages: [ 17 | { duration: '5s', target: 5 }, 18 | { duration: '10s', target: 5 }, 19 | { duration: '5s', target: 0 }, 20 | ], 21 | cloud: { 22 | projectID: 3756871, 23 | name: 'k6-operator-e2e-cloud-output' 24 | } 25 | }; 26 | 27 | const pizzas = new Counter('quickpizza_number_of_pizzas'); 28 | const ingredients = new Trend('quickpizza_ingredients'); 29 | 30 | export function setup() { 31 | // TODO: Send notification to Slack 32 | console.log("Starting folks!") 33 | } 34 | 35 | export default function () { 36 | let restrictions = { 37 | maxCaloriesPerSlice: 500, 38 | mustBeVegetarian: false, 39 | excludedIngredients: ["pepperoni"], 40 | excludedTools: ["knife"], 41 | maxNumberOfToppings: 6, 42 | minNumberOfToppings: 2 43 | } 44 | let res = http.post(`/api/pizza`, JSON.stringify(restrictions), { 45 | headers: { 46 | 'Content-Type': 'application/json', 47 | 'X-User-ID': 23423, 48 | }, 49 | }); 50 | check(res, { "status is 200": (res) => res.status === 200 }); 51 | console.log(`${res.json().pizza.name} (${res.json().pizza.ingredients.length} ingredients)`); 52 | pizzas.add(1); 53 | ingredients.add(res.json().pizza.ingredients.length); 54 | sleep(1); 55 | } 56 | 57 | export function teardown(){ 58 | // TODO: Send notification to Slack 59 | console.log("That's all folks!") 60 | } -------------------------------------------------------------------------------- /e2e/testrun-cloud-output/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | - namespace.yaml 7 | - secret.yaml -------------------------------------------------------------------------------- /e2e/testrun-cloud-output/manifests/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: k6-tests -------------------------------------------------------------------------------- /e2e/testrun-cloud-output/manifests/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloud-token 5 | namespace: k6-operator-system 6 | labels: 7 | k6cloud: token 8 | data: 9 | token: $CLOUD_TOKEN -------------------------------------------------------------------------------- /e2e/testrun-cloud-output/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "testrun-cloud-output", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | export default function () { 24 | let err = env.apply(PARENT + "testrun.yaml"); 25 | console.log("apply testrun returns", err); 26 | 27 | err = env.wait({ 28 | kind: "TestRun", 29 | name: "k6-sample", //tr.name(), 30 | namespace: "k6-tests", //tr.namespace(), 31 | status_key: "stage", 32 | status_value: "started", 33 | }, { 34 | timeout: "1m", 35 | interval: "10s", 36 | }); 37 | 38 | if (err != null) { 39 | fail("wait for started returns" + err); 40 | } 41 | 42 | let allPods = env.getN("pods", { 43 | "namespace": "k6-tests", //tr.namespace() 44 | "app": "k6", 45 | "k6_cr": "k6-sample", //tr.name() 46 | }); 47 | 48 | let runnerPods = env.getN("pods", { 49 | "namespace": "k6-tests", //tr.namespace() 50 | "app": "k6", 51 | "k6_cr": "k6-sample", //tr.name() 52 | "runner": "true", 53 | }); 54 | 55 | // there should be N runners pods + initializer + starter 56 | if (runnerPods != 4 || allPods != runnerPods + 2) { 57 | fail("wrong number of pods:" + runnerPods + "/" + allPods); 58 | } 59 | 60 | err = env.wait({ 61 | kind: "TestRun", 62 | name: "k6-sample", //tr.name(), 63 | namespace: "k6-tests", //tr.namespace(), 64 | status_key: "stage", 65 | status_value: "finished", 66 | }, { 67 | timeout: "5m", 68 | interval: "10s", 69 | }); 70 | 71 | // TODO: add check for status of the pods 72 | 73 | if (err != null) { 74 | fail("wait for finished returns" + err); 75 | } 76 | } 77 | 78 | export function teardown() { 79 | console.log("delete returns", env.delete()); 80 | } -------------------------------------------------------------------------------- /e2e/testrun-cloud-output/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | namespace: k6-tests 7 | spec: 8 | parallelism: 4 9 | script: 10 | configMap: 11 | name: "test" 12 | file: "test.js" 13 | arguments: --tag office=hours --out cloud 14 | runner: 15 | env: 16 | - name: BASE_URL 17 | value: https://pizza.grafana.fun 18 | -------------------------------------------------------------------------------- /e2e/testrun-simultaneous-cloud-output/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: stress-test 5 | namespace: k6-tests 6 | data: 7 | test.js: | 8 | import http from 'k6/http'; 9 | import { check } from 'k6'; 10 | 11 | export let options = { 12 | stages: [ 13 | { target: 200, duration: '30s' }, 14 | { target: 0, duration: '30s' }, 15 | ], 16 | cloud: { 17 | projectID: 3756871, 18 | name: 'k6-operator-e2e-simultaneous-cloud-output' 19 | } 20 | }; 21 | 22 | export default function () { 23 | const result = http.get('https://quickpizza.grafana.com'); 24 | check(result, { 25 | 'http response status code is 200': result.status === 200, 26 | }); 27 | } 28 | -------------------------------------------------------------------------------- /e2e/testrun-simultaneous-cloud-output/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | - namespace.yaml 7 | - secret.yaml -------------------------------------------------------------------------------- /e2e/testrun-simultaneous-cloud-output/manifests/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: k6-tests -------------------------------------------------------------------------------- /e2e/testrun-simultaneous-cloud-output/manifests/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloud-token 5 | namespace: k6-operator-system 6 | labels: 7 | k6cloud: token 8 | data: 9 | token: $CLOUD_TOKEN -------------------------------------------------------------------------------- /e2e/testrun-simultaneous-cloud-output/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | import { randomIntBetween } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js'; 5 | 6 | export const options = { 7 | setupTimeout: '60s', 8 | }; 9 | 10 | const PARENT = "./" 11 | 12 | const env = new Environment({ 13 | name: "testrun-simultaneous-cloud-output", 14 | implementation: "vcluster", 15 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 16 | }) 17 | 18 | export function setup() { 19 | console.log("init returns", env.init()); 20 | // it is best to have a bit of delay between creating a CRD and 21 | // a corresponding CR, so as to avoid the "no matches" error 22 | sleep(0.5); 23 | } 24 | 25 | // A test to check simultaneous execution of 2 tests is successful. 26 | export default function () { 27 | let err = env.apply(PARENT + "testrun1.yaml"); 28 | console.log("apply testrun1 returns", err) 29 | 30 | err = env.apply(PARENT + "testrun2.yaml"); 31 | console.log("apply testrun2 returns", err) 32 | 33 | const r = randomIntBetween(1, 2); 34 | 35 | // randomize order of the check as it shouldn't matter 36 | if (r > 1) { 37 | wait_for_second(env); 38 | wait_for_first(env); 39 | } else { 40 | wait_for_first(env); 41 | wait_for_second(env); 42 | } 43 | } 44 | 45 | export function teardown() { 46 | console.log("delete returns", env.delete()); 47 | } 48 | 49 | function wait_for_first(env) { 50 | let err = env.wait({ 51 | kind: "TestRun", 52 | name: "t-2-runners", //tr1.name(), 53 | namespace: "k6-tests", //tr1.namespace(), 54 | status_key: "stage", 55 | status_value: "finished", 56 | }, { 57 | timeout: "5m", 58 | interval: "1m", 59 | }); 60 | 61 | if (err != null) { 62 | fail("wait for t-2-runners returns" + err); 63 | } 64 | 65 | let allPods = env.getN("pods", { 66 | "namespace": "k6-tests", //tr.namespace() 67 | "app": "k6", 68 | "k6_cr": "t-2-runners", //tr.name() 69 | }); 70 | 71 | // there should be N runners pods + initializer + starter 72 | if (allPods != 2 + 2) { 73 | fail("wrong number of pods:" + allPods + " instead of " + 4); 74 | } 75 | } 76 | 77 | function wait_for_second(env) { 78 | let err = env.wait({ 79 | kind: "TestRun", 80 | name: "t-3-runners", //tr2.name(), 81 | namespace: "k6-tests", //tr2.namespace(), 82 | status_key: "stage", 83 | status_value: "finished", 84 | }, { 85 | timeout: "5m", 86 | interval: "1m", 87 | }); 88 | 89 | if (err != null) { 90 | fail("wait for t-3-runners returns" + err); 91 | } 92 | 93 | let allPods = env.getN("pods", { 94 | "namespace": "k6-tests", //tr.namespace() 95 | "app": "k6", 96 | "k6_cr": "t-3-runners", //tr.name() 97 | }); 98 | 99 | // there should be N runners pods + initializer + starter 100 | if (allPods != 3 + 2) { 101 | fail("wrong number of pods:" + allPods + " instead of " + 5); 102 | } 103 | } -------------------------------------------------------------------------------- /e2e/testrun-simultaneous-cloud-output/testrun1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: t-2-runners 6 | namespace: k6-tests 7 | spec: 8 | parallelism: 2 9 | script: 10 | configMap: 11 | name: "stress-test" 12 | file: "test.js" 13 | arguments: --out cloud 14 | -------------------------------------------------------------------------------- /e2e/testrun-simultaneous-cloud-output/testrun2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: t-3-runners 6 | namespace: k6-tests 7 | spec: 8 | parallelism: 3 9 | script: 10 | configMap: 11 | name: "stress-test" 12 | file: "test.js" 13 | arguments: --out cloud -------------------------------------------------------------------------------- /e2e/testrun-simultaneous/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: stress-test 5 | namespace: default 6 | data: 7 | test.js: | 8 | import http from 'k6/http'; 9 | import { check } from 'k6'; 10 | 11 | export let options = { 12 | stages: [ 13 | { target: 200, duration: '30s' }, 14 | { target: 0, duration: '30s' }, 15 | ], 16 | }; 17 | 18 | export default function () { 19 | const result = http.get('https://quickpizza.grafana.com'); 20 | check(result, { 21 | 'http response status code is 200': result.status === 200, 22 | }); 23 | } 24 | -------------------------------------------------------------------------------- /e2e/testrun-simultaneous/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../latest/ 5 | - configmap.yaml 6 | -------------------------------------------------------------------------------- /e2e/testrun-simultaneous/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | import { randomIntBetween } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js'; 5 | 6 | export const options = { 7 | setupTimeout: '60s', 8 | }; 9 | 10 | const PARENT = "./" 11 | 12 | const env = new Environment({ 13 | name: "testrun-simultaneous", 14 | implementation: "vcluster", 15 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 16 | }) 17 | 18 | export function setup() { 19 | console.log("init returns", env.init()); 20 | // it is best to have a bit of delay between creating a CRD and 21 | // a corresponding CR, so as to avoid the "no matches" error 22 | sleep(0.5); 23 | } 24 | 25 | // A test to check simultaneous execution of 2 tests is successful. 26 | export default function () { 27 | let err = env.apply(PARENT + "testrun1.yaml"); 28 | console.log("apply testrun1 returns", err) 29 | 30 | err = env.apply(PARENT + "testrun2.yaml"); 31 | console.log("apply testrun2 returns", err) 32 | 33 | const r = randomIntBetween(1, 2); 34 | 35 | // randomize order of the check as it shouldn't matter 36 | if (r > 1) { 37 | wait_for_second(env); 38 | wait_for_first(env); 39 | } else { 40 | wait_for_first(env); 41 | wait_for_second(env); 42 | } 43 | } 44 | 45 | export function teardown() { 46 | console.log("delete returns", env.delete()); 47 | } 48 | 49 | function wait_for_first(env) { 50 | let err = env.wait({ 51 | kind: "TestRun", 52 | name: "t-2-runners", //tr1.name(), 53 | namespace: "default", //tr1.namespace(), 54 | status_key: "stage", 55 | status_value: "finished", 56 | }, { 57 | timeout: "5m", 58 | interval: "1m", 59 | }); 60 | 61 | if (err != null) { 62 | fail("wait for t-2-runners returns" + err); 63 | } 64 | 65 | let allPods = env.getN("pods", { 66 | "namespace": "default", //tr.namespace() 67 | "app": "k6", 68 | "k6_cr": "t-2-runners", //tr.name() 69 | }); 70 | 71 | // there should be N runners pods + initializer + starter 72 | if (allPods != 2 + 2) { 73 | fail("wrong number of pods:" + allPods + " instead of " + 4); 74 | } 75 | } 76 | 77 | function wait_for_second(env) { 78 | let err = env.wait({ 79 | kind: "TestRun", 80 | name: "t-3-runners", //tr2.name(), 81 | namespace: "default", //tr2.namespace(), 82 | status_key: "stage", 83 | status_value: "finished", 84 | }, { 85 | timeout: "5m", 86 | interval: "1m", 87 | }); 88 | 89 | if (err != null) { 90 | fail("wait for t-3-runners returns" + err); 91 | } 92 | 93 | let allPods = env.getN("pods", { 94 | "namespace": "default", //tr.namespace() 95 | "app": "k6", 96 | "k6_cr": "t-3-runners", //tr.name() 97 | }); 98 | 99 | // there should be N runners pods + initializer + starter 100 | if (allPods != 3 + 2) { 101 | fail("wrong number of pods:" + allPods + " instead of " + 5); 102 | } 103 | } -------------------------------------------------------------------------------- /e2e/testrun-simultaneous/testrun1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: t-2-runners 6 | spec: 7 | parallelism: 2 8 | script: 9 | configMap: 10 | name: "stress-test" 11 | file: "test.js" 12 | -------------------------------------------------------------------------------- /e2e/testrun-simultaneous/testrun2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: t-3-runners 6 | spec: 7 | parallelism: 3 8 | script: 9 | configMap: 10 | name: "stress-test" 11 | file: "test.js" 12 | -------------------------------------------------------------------------------- /e2e/testrun-watch-namespace/manifests/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: stress-test 5 | namespace: some-ns 6 | data: 7 | test.js: | 8 | import http from 'k6/http'; 9 | import { check } from 'k6'; 10 | 11 | export let options = { 12 | stages: [ 13 | { target: 200, duration: '30s' }, 14 | { target: 0, duration: '30s' }, 15 | ], 16 | }; 17 | 18 | export default function () { 19 | const result = http.get('https://quickpizza.grafana.com'); 20 | check(result, { 21 | 'http response status code is 200': result.status === 200, 22 | }); 23 | } 24 | -------------------------------------------------------------------------------- /e2e/testrun-watch-namespace/manifests/invisible-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: invisible 5 | -------------------------------------------------------------------------------- /e2e/testrun-watch-namespace/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../latest 6 | - configmap.yaml 7 | - some-ns.yaml 8 | - invisible-ns.yaml 9 | 10 | # Deployment does not have any env vars by default so to add an env var, 11 | # we need to create a list. Reference: 12 | # https://github.com/kubernetes-sigs/kustomize/issues/1439#issuecomment-520614831 13 | patches: 14 | - patch: |- 15 | - op: add 16 | path: "/spec/template/spec/containers/0/env" 17 | value: 18 | - name: WATCH_NAMESPACE 19 | value: "some-ns" 20 | target: 21 | kind: Deployment 22 | namespace: k6-operator-system 23 | name: k6-operator-controller-manager 24 | -------------------------------------------------------------------------------- /e2e/testrun-watch-namespace/manifests/some-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: some-ns 5 | -------------------------------------------------------------------------------- /e2e/testrun-watch-namespace/test.js: -------------------------------------------------------------------------------- 1 | import { Environment } from 'k6/x/environment'; 2 | import { sleep, fail } from 'k6'; 3 | 4 | export const options = { 5 | setupTimeout: '60s', 6 | }; 7 | 8 | const PARENT = "./" 9 | 10 | const env = new Environment({ 11 | name: "testrun-watch-namespace", 12 | implementation: "vcluster", 13 | initFolder: PARENT + "manifests", // initial folder with everything that wil be loaded at init 14 | }) 15 | 16 | export function setup() { 17 | console.log("init returns", env.init()); 18 | // it is best to have a bit of delay between creating a CRD and 19 | // a corresponding CR, so as to avoid the "no matches" error 20 | sleep(0.5); 21 | } 22 | 23 | // A test to check behaviour of WATCH_NAMESPACE 24 | export default function () { 25 | let err = env.apply(PARENT + "testrun.yaml"); 26 | console.log("apply testrun returns", err) 27 | 28 | err = env.apply(PARENT + "testrun-invisible.yaml"); 29 | console.log("apply testrun-invisible returns", err) 30 | 31 | err = env.wait({ 32 | kind: "TestRun", 33 | name: "k6-sample", //tr1.name(), 34 | namespace: "some-ns", //tr1.namespace(), 35 | status_key: "stage", 36 | status_value: "finished", 37 | }, { 38 | timeout: "5m", 39 | interval: "1m", 40 | }); 41 | 42 | if (err != null) { 43 | fail("wait for k6-sample in some-ns returns" + err); 44 | } 45 | 46 | err = env.wait({ 47 | kind: "TestRun", 48 | name: "k6-sample", //tr2.name(), 49 | namespace: "invisible", //tr2.namespace(), 50 | status_key: "stage", 51 | status_value: "", // stage here should never be populated by k6-operator 52 | }, { 53 | timeout: "5m", 54 | interval: "1m", 55 | }); 56 | 57 | // Uncomment this, once this issue is done: 58 | // https://github.com/grafana/xk6-environment/issues/17 59 | // if (err !== null) { 60 | // fail("wait for k6-sample in default returns" + err); 61 | // } 62 | } 63 | 64 | export function teardown() { 65 | console.log("delete returns", env.delete()); 66 | } 67 | -------------------------------------------------------------------------------- /e2e/testrun-watch-namespace/testrun-invisible.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | namespace: invisible 7 | spec: 8 | parallelism: 1 9 | script: 10 | configMap: 11 | name: "stress-test" 12 | file: "test.js" -------------------------------------------------------------------------------- /e2e/testrun-watch-namespace/testrun.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k6.io/v1alpha1 3 | kind: TestRun 4 | metadata: 5 | name: k6-sample 6 | namespace: some-ns 7 | spec: 8 | parallelism: 1 9 | script: 10 | configMap: 11 | name: "stress-test" 12 | file: "test.js" -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /pkg/cloud/aggregation.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "go.k6.io/k6/cloudapi" 8 | corev1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | var aggregationVarNames = []string{ 12 | // cloud output v2 13 | "K6_CLOUD_API_VERSION", 14 | "K6_CLOUD_AGGREGATION_PERIOD", 15 | "K6_CLOUD_AGGREGATION_WAIT_PERIOD", 16 | "K6_CLOUD_METRIC_PUSH_INTERVAL", 17 | "K6_CLOUD_METRIC_PUSH_CONCURRENCY", 18 | } 19 | 20 | func EncodeAggregationConfig(testRun *cloudapi.Config) string { 21 | return fmt.Sprintf("%d|%s|%s|%s|%d", 22 | 2, // version of protocol 23 | testRun.AggregationPeriod.String(), 24 | testRun.AggregationWaitPeriod.String(), 25 | testRun.MetricPushInterval.String(), 26 | testRun.MetricPushConcurrency.Int64) 27 | } 28 | 29 | func DecodeAggregationConfig(encoded string) ([]corev1.EnvVar, error) { 30 | values := strings.Split(encoded, "|") 31 | 32 | if len(values) != len(aggregationVarNames) { 33 | return nil, fmt.Errorf( 34 | "Aggregation vars got corrupted: there are %d values instead of %d. Encoded value: `%s`.", 35 | len(values), 36 | len(aggregationVarNames), 37 | encoded) 38 | } 39 | 40 | vars := make([]corev1.EnvVar, len(values)) 41 | for i := range values { 42 | vars[i] = corev1.EnvVar{ 43 | Name: aggregationVarNames[i], 44 | Value: values[i], 45 | } 46 | } 47 | 48 | return vars, nil 49 | } 50 | -------------------------------------------------------------------------------- /pkg/cloud/aggregation_test.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "go.k6.io/k6/cloudapi" 9 | "go.k6.io/k6/lib/types" 10 | "gopkg.in/guregu/null.v3" 11 | corev1 "k8s.io/api/core/v1" 12 | ) 13 | 14 | func Test_EncodeAggregationConfig(t *testing.T) { 15 | expected := "2|5s|3s|10s|10" 16 | 17 | testRunResponse := &cloudapi.CreateTestRunResponse{ 18 | ReferenceID: "test-run-id", 19 | ConfigOverride: &cloudapi.Config{ 20 | AggregationPeriod: types.NullDurationFrom(time.Second * 5), 21 | AggregationWaitPeriod: types.NullDurationFrom(time.Second * 3), 22 | MetricPushInterval: types.NullDurationFrom(time.Second * 10), 23 | MetricPushConcurrency: null.IntFrom(10), 24 | }, 25 | } 26 | 27 | encodedAggregation := EncodeAggregationConfig(testRunResponse.ConfigOverride) 28 | assert.Equal(t, expected, encodedAggregation) 29 | } 30 | 31 | func Test_DecodeAggregationConfig(t *testing.T) { 32 | var ( 33 | encoded = "2|5s|3s|10s|10" 34 | 35 | expected = []corev1.EnvVar{ 36 | { 37 | Name: "K6_CLOUD_API_VERSION", 38 | Value: "2", 39 | }, 40 | { 41 | Name: "K6_CLOUD_AGGREGATION_PERIOD", 42 | Value: "5s", 43 | }, 44 | { 45 | Name: "K6_CLOUD_AGGREGATION_WAIT_PERIOD", 46 | Value: "3s", 47 | }, 48 | { 49 | Name: "K6_CLOUD_METRIC_PUSH_INTERVAL", 50 | Value: "10s", 51 | }, 52 | { 53 | Name: "K6_CLOUD_METRIC_PUSH_CONCURRENCY", 54 | Value: "10", 55 | }, 56 | } 57 | ) 58 | 59 | envVars, err := DecodeAggregationConfig(encoded) 60 | assert.Equal(t, nil, err) 61 | 62 | for i, expectedEnvVar := range expected { 63 | assert.Equal(t, expectedEnvVar, envVars[i]) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /pkg/cloud/cloud_output.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "time" 7 | 8 | "github.com/go-logr/logr" 9 | "github.com/sirupsen/logrus" 10 | "go.k6.io/k6/cloudapi" 11 | "go.k6.io/k6/lib/consts" 12 | null "gopkg.in/guregu/null.v3" 13 | ) 14 | 15 | // TODO: refactor this! 16 | var client *cloudapi.Client 17 | 18 | type TestRun struct { 19 | Name string `json:"name"` 20 | ProjectID int64 `json:"project_id,omitempty"` 21 | VUsMax int64 `json:"vus"` 22 | Thresholds map[string][]string `json:"thresholds"` 23 | Duration int64 `json:"duration"` 24 | ProcessThresholds bool `json:"process_thresholds"` 25 | Instances int32 `json:"instances"` 26 | } 27 | 28 | func NewClient(log logr.Logger, token, host string) *cloudapi.Client { 29 | logger := &logrus.Logger{ 30 | Out: os.Stdout, 31 | Formatter: new(logrus.TextFormatter), 32 | Hooks: make(logrus.LevelHooks), 33 | Level: logrus.InfoLevel, 34 | } 35 | 36 | cloudConfig := cloudapi.NewConfig() 37 | 38 | if len(host) == 0 { 39 | host = cloudConfig.Host.String 40 | } 41 | 42 | return cloudapi.NewClient(logger, token, host, consts.Version, time.Duration(time.Minute)) 43 | } 44 | 45 | func CreateTestRun(opts InspectOutput, instances int32, host, token string, log logr.Logger) (*cloudapi.CreateTestRunResponse, error) { 46 | if client == nil { 47 | client = NewClient(log, token, host) 48 | } 49 | 50 | cloudConfig := cloudapi.NewConfig() 51 | 52 | if opts.ProjectID() > 0 { 53 | cloudConfig.ProjectID = null.NewInt(opts.ProjectID(), true) 54 | } 55 | 56 | thresholds := make(map[string][]string, len(opts.Thresholds)) 57 | for name, t := range opts.Thresholds { 58 | for _, threshold := range t.Thresholds { 59 | thresholds[name] = append(thresholds[name], threshold.Source) 60 | } 61 | } 62 | 63 | if len(host) == 0 { 64 | host = cloudConfig.Host.String 65 | } 66 | 67 | if client == nil { 68 | client = NewClient(log, token, host) 69 | } 70 | 71 | tr := TestRun{ 72 | Name: opts.TestName(), 73 | ProjectID: cloudConfig.ProjectID.Int64, 74 | VUsMax: int64(opts.MaxVUs), 75 | Thresholds: thresholds, 76 | Duration: int64(opts.TotalDuration.TimeDuration().Seconds()), 77 | ProcessThresholds: true, 78 | Instances: instances, 79 | } 80 | return createTestRun(client, host, &tr) 81 | } 82 | 83 | // We cannot use cloudapi.TestRun struct and cloudapi.Client.CreateTestRun call because they're not aware of 84 | // process_thresholds argument; so let's use custom struct and function instead 85 | func createTestRun(client *cloudapi.Client, host string, testRun *TestRun) (*cloudapi.CreateTestRunResponse, error) { 86 | url := host + "/v1/tests" 87 | req, err := client.NewRequest("POST", url, testRun) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | ctrr := cloudapi.CreateTestRunResponse{} 93 | err = client.Do(req, &ctrr) 94 | if err != nil { 95 | return nil, err 96 | } 97 | 98 | if ctrr.ReferenceID == "" { 99 | return nil, fmt.Errorf("failed to get a reference ID") 100 | } 101 | 102 | return &ctrr, nil 103 | } 104 | 105 | func FinishTestRun(c *cloudapi.Client, refID string) error { 106 | if c != nil { 107 | return c.TestFinished(refID, cloudapi.ThresholdResult( 108 | map[string]map[string]bool{}, 109 | ), false, cloudapi.RunStatusFinished) 110 | } 111 | 112 | return client.TestFinished(refID, cloudapi.ThresholdResult( 113 | map[string]map[string]bool{}, 114 | ), false, cloudapi.RunStatusFinished) 115 | } 116 | -------------------------------------------------------------------------------- /pkg/cloud/conn/poller.go: -------------------------------------------------------------------------------- 1 | package conn 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type Poller struct { 8 | interval time.Duration 9 | running bool 10 | done chan bool 11 | 12 | OnInterval func() 13 | OnDone func() 14 | } 15 | 16 | func NewPoller(interval time.Duration) *Poller { 17 | return &Poller{ 18 | interval: interval, 19 | running: false, 20 | done: make(chan bool), 21 | 22 | // by default, poller does nothing 23 | OnInterval: func() {}, 24 | OnDone: func() {}, 25 | } 26 | } 27 | 28 | func (poller Poller) IsPolling() bool { 29 | return poller.running 30 | } 31 | 32 | func (poller *Poller) Start() { 33 | ticker := time.NewTicker(poller.interval) 34 | 35 | go func() { 36 | for { 37 | select { 38 | case <-poller.done: 39 | ticker.Stop() 40 | poller.OnDone() 41 | return 42 | 43 | case <-ticker.C: 44 | poller.OnInterval() 45 | } 46 | } 47 | }() 48 | 49 | poller.running = true 50 | } 51 | 52 | func (poller *Poller) Stop() { 53 | if poller.IsPolling() { 54 | poller.done <- true 55 | 56 | poller.running = false 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /pkg/cloud/conn/poller_test.go: -------------------------------------------------------------------------------- 1 | package conn 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func Test_PollerStops(t *testing.T) { 9 | var x int 10 | 11 | poller := NewPoller(time.Millisecond * 100) 12 | poller.OnInterval = func() { 13 | // sleep for 200ms before changing the var so that each polling loop 14 | // takes twice longer than the ticker itself 15 | time.Sleep(time.Millisecond * 200) 16 | x++ 17 | } 18 | 19 | pollerStopped := make(chan struct{}) 20 | 21 | poller.Start() 22 | 23 | go func() { 24 | // If we stop after 300ms, then there should be ~ 2 25 | // polling loops done in total. 26 | time.Sleep(time.Millisecond * 300) 27 | poller.Stop() 28 | pollerStopped <- struct{}{} 29 | }() 30 | 31 | select { 32 | case <-pollerStopped: 33 | if poller.IsPolling() { 34 | t.Error("Poller is running even though pollerStopped was marked: something may be off with the test.") 35 | } 36 | 37 | if x == 0 { 38 | t.Error("Poller stopped correctly, but var's value hasn't been increased") 39 | } 40 | 41 | case <-time.After(time.Second * 10): 42 | t.Errorf("Poller failed to stop after 5 seconds. Poller status: %v", poller.IsPolling()) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /pkg/cloud/plz.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | "go.k6.io/k6/cloudapi" 9 | ) 10 | 11 | const ( 12 | defaultApiUrl = "https://api.k6.io" 13 | defaultIngestUrl = "https://ingest.k6.io" 14 | ) 15 | 16 | func RegisterPLZ(client *cloudapi.Client, data PLZRegistrationData) error { 17 | url := fmt.Sprintf("%s/cloud-resources/v1/load-zones", strings.TrimSuffix(client.BaseURL(), "/v1")) 18 | 19 | data.LZConfig = LZConfig{ 20 | RunnerImage: data.RunnerImage, 21 | } 22 | 23 | req, err := client.NewRequest("POST", url, data) 24 | if err != nil { 25 | return err 26 | } 27 | 28 | var resp struct { 29 | Error struct { 30 | Message string `json:"message"` 31 | } `json:"error"` 32 | } 33 | if err = client.Do(req, &resp); err != nil { 34 | return fmt.Errorf("Received error `%s`. Message from server `%s`", err.Error(), resp.Error.Message) 35 | } 36 | 37 | return nil 38 | } 39 | 40 | func DeRegisterPLZ(client *cloudapi.Client, name string) error { 41 | url := fmt.Sprintf("%s/cloud-resources/v1/load-zones/%s", strings.TrimSuffix(client.BaseURL(), "/v1"), name) 42 | 43 | req, err := client.NewRequest("DELETE", url, nil) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | return client.Do(req, nil) 49 | } 50 | 51 | // temporary hack! 52 | func ApiURL(k6CloudHostEnvVar string) string { 53 | url := defaultApiUrl 54 | if strings.Contains(k6CloudHostEnvVar, "staging") { 55 | url = "https://api.staging.k6.io" 56 | } 57 | return url 58 | } 59 | 60 | func K6CloudHost() string { 61 | host, ok := os.LookupEnv("K6_CLOUD_HOST") 62 | if !ok { 63 | return "https://ingest.k6.io" 64 | } 65 | 66 | return host 67 | } 68 | -------------------------------------------------------------------------------- /pkg/cloud/resources_test.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "testing" 5 | 6 | "k8s.io/apimachinery/pkg/api/resource" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | // 1 CPU in Kubernetes = 1 AWS vCPU = 1 GCP Core = 1 Azure vCore 12 | // Docs: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units 13 | 14 | func TestConversion(t *testing.T) { 15 | testCases := []struct { 16 | k8sResource string 17 | expected float64 18 | }{ 19 | // CPU 20 | { 21 | "512m", 22 | 0.512, 23 | }, 24 | { 25 | "1000m", 26 | 1, 27 | }, 28 | { 29 | "1", 30 | 1, 31 | }, 32 | { 33 | "100", 34 | 100, 35 | }, 36 | // Memory 37 | { 38 | "104857600", 39 | 104857600, 40 | }, 41 | { 42 | "100M", 43 | 100000000, 44 | }, 45 | { 46 | "100Mi", 47 | 104857600, 48 | }, 49 | { 50 | "150Mi", 51 | 157286400, 52 | }, 53 | { 54 | "1050Mi", 55 | 1101004800, 56 | }, 57 | { 58 | "4000M", 59 | 4000000000, 60 | }, 61 | { 62 | "4000Mi", 63 | 4194304000, 64 | }, 65 | { 66 | "4Gi", 67 | 4294967296, 68 | }, 69 | { 70 | "10000Mi", 71 | 10485760000, 72 | }, 73 | { 74 | "16G", 75 | 16000000000, 76 | }, 77 | { 78 | "32G", 79 | 32000000000, 80 | }, 81 | { 82 | "64G", 83 | 64000000000, 84 | }, 85 | } 86 | 87 | for _, testCase := range testCases { 88 | q := resource.MustParse(testCase.k8sResource) 89 | got := q.AsApproximateFloat64() 90 | assert.Equal(t, testCase.expected, got, "testCase", testCase) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /pkg/cloud/types_test.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | ) 7 | 8 | func TestInspectOutput_TestNameAndProjectID(t *testing.T) { 9 | t.Parallel() 10 | 11 | tests := []struct { 12 | name string 13 | fields []byte 14 | expectedProjectID int64 15 | expectedName string 16 | }{ 17 | { 18 | name: "empty", 19 | fields: []byte(`{}`), 20 | expectedProjectID: 0, 21 | }, 22 | { 23 | name: "only legacy way of defining the options", 24 | fields: []byte(`{"ext":{"loadimpact":{"name":"test","projectID":123}}}`), 25 | expectedProjectID: 123, 26 | expectedName: "test", 27 | }, 28 | { 29 | name: "only new way of defining the options", 30 | fields: []byte(`{"cloud":{"name":"lorem","projectID":321}}`), 31 | expectedProjectID: 321, 32 | expectedName: "lorem", 33 | }, 34 | { 35 | name: "both way, priority to new way", 36 | fields: []byte(`{"cloud":{"name":"ipsum","projectID":987},"ext":{"loadimpact":{"name":"test","projectID":123}}}`), 37 | expectedProjectID: 987, 38 | expectedName: "ipsum", 39 | }, 40 | } 41 | for _, tt := range tests { 42 | tt := tt 43 | 44 | t.Run(tt.name, func(t *testing.T) { 45 | t.Parallel() 46 | 47 | var io *InspectOutput 48 | if err := json.Unmarshal(tt.fields, &io); err != nil { 49 | t.Errorf("error unmarshalling json: %v", err) 50 | } 51 | 52 | if got := io.ProjectID(); got != tt.expectedProjectID { 53 | t.Errorf("InspectOutput.ProjectID() = %v, want %v", got, tt.expectedProjectID) 54 | } 55 | 56 | if got := io.TestName(); got != tt.expectedName { 57 | t.Errorf("InspectOutput.TestName() = %v, want %v", got, tt.expectedName) 58 | } 59 | }) 60 | } 61 | } 62 | 63 | func TestInspectOutput_SetTestName(t *testing.T) { 64 | t.Parallel() 65 | 66 | io := &InspectOutput{} 67 | if got := io.TestName(); got != "" { 68 | t.Errorf("InspectOutput.TestName() = %v, want empty name", got) 69 | } 70 | 71 | io.SetTestName("test-lore-ipsum") 72 | if got := io.TestName(); got != "test-lore-ipsum" { 73 | t.Errorf("InspectOutput.TestName() = %v, want test-lore-ipsum", got) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /pkg/plz/scheme.go: -------------------------------------------------------------------------------- 1 | package plz 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | ) 6 | 7 | var ( 8 | // according to the docs, scheme should be thread-safe at this point: 9 | // https://pkg.go.dev/k8s.io/apimachinery/pkg/runtime#Scheme 10 | // so let's try this 11 | scheme *runtime.Scheme 12 | ) 13 | 14 | func SetScheme(s *runtime.Scheme) { 15 | scheme = s 16 | } 17 | -------------------------------------------------------------------------------- /pkg/plz/workers.go: -------------------------------------------------------------------------------- 1 | package plz 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // PLZWorkers is a gathering point for multiple PLZ workers. 9 | // Note: while PLZWorkers is thread-safe, a lone PLZWorker is not. 10 | type PLZWorkers struct { 11 | m sync.Map 12 | } 13 | 14 | func (w *PLZWorkers) AddWorker(name string, worker *PLZWorker) error { 15 | _, loaded := w.m.LoadOrStore(name, worker) 16 | if loaded { 17 | return fmt.Errorf("PLZ worker has already been added") 18 | } 19 | return nil 20 | } 21 | 22 | func (w *PLZWorkers) GetWorker(name string) (worker *PLZWorker, err error) { 23 | ptr, ok := w.m.Load(name) 24 | if !ok { 25 | return nil, fmt.Errorf("PLZ worker doesn't exist anymore") 26 | } 27 | 28 | if worker, ok = ptr.(*PLZWorker); !ok { 29 | return nil, fmt.Errorf("Cannot load PLZ worker: this might be a bug!") 30 | } 31 | 32 | return worker, nil 33 | } 34 | 35 | func (w *PLZWorkers) DeleteWorker(name string) { 36 | w.m.Delete(name) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/resources/containers/curl_start.go: -------------------------------------------------------------------------------- 1 | package containers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net" 7 | "strings" 8 | 9 | "github.com/grafana/k6-operator/pkg/types" 10 | corev1 "k8s.io/api/core/v1" 11 | 12 | resource "k8s.io/apimachinery/pkg/api/resource" 13 | ) 14 | 15 | // NewStartContainer is used to get a template for a new k6 starting curl container. 16 | func NewStartContainer(hostnames []string, image string, imagePullPolicy corev1.PullPolicy, command []string, env []corev1.EnvVar, securityContext corev1.SecurityContext) corev1.Container { 17 | req, _ := json.Marshal( 18 | types.StatusAPIRequest{ 19 | Data: types.StatusAPIRequestData{ 20 | Attributes: types.StatusAPIRequestDataAttributes{ 21 | Paused: false, 22 | }, 23 | ID: "default", 24 | Type: "status", 25 | }, 26 | }) 27 | 28 | var parts []string 29 | for _, hostname := range hostnames { 30 | parts = append(parts, fmt.Sprintf("curl --retry 3 -X PATCH -H 'Content-Type: application/json' http://%s/v1/status -d '%s'", net.JoinHostPort(hostname, "6565"), req)) 31 | } 32 | 33 | return corev1.Container{ 34 | Name: "k6-curl", 35 | Image: image, 36 | ImagePullPolicy: imagePullPolicy, 37 | Env: env, 38 | Resources: corev1.ResourceRequirements{ 39 | Requests: corev1.ResourceList{ 40 | corev1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI), 41 | corev1.ResourceMemory: *resource.NewQuantity(2097152, resource.BinarySI), 42 | }, 43 | Limits: corev1.ResourceList{ 44 | corev1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), 45 | corev1.ResourceMemory: *resource.NewQuantity(209715200, resource.BinarySI), 46 | }, 47 | }, 48 | Command: append( 49 | command, 50 | strings.Join(parts, ";"), 51 | ), 52 | SecurityContext: &securityContext, 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /pkg/resources/containers/curl_stop.go: -------------------------------------------------------------------------------- 1 | package containers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net" 7 | "strings" 8 | 9 | "github.com/grafana/k6-operator/pkg/types" 10 | corev1 "k8s.io/api/core/v1" 11 | 12 | resource "k8s.io/apimachinery/pkg/api/resource" 13 | ) 14 | 15 | // NewStopContainer is used to get a template for a new k6 stop curl container. 16 | func NewStopContainer(hostnames []string, image string, imagePullPolicy corev1.PullPolicy, command []string, env []corev1.EnvVar, securityContext corev1.SecurityContext) corev1.Container { 17 | req, _ := json.Marshal( 18 | types.StatusAPIRequest{ 19 | Data: types.StatusAPIRequestData{ 20 | Attributes: types.StatusAPIRequestDataAttributes{ 21 | Stopped: true, 22 | }, 23 | ID: "default", 24 | Type: "status", 25 | }, 26 | }) 27 | 28 | var parts []string 29 | for _, hostname := range hostnames { 30 | parts = append(parts, fmt.Sprintf("curl --retry 3 -X PATCH -H 'Content-Type: application/json' http://%s/v1/status -d '%s'", net.JoinHostPort(hostname, "6565"), req)) 31 | } 32 | 33 | return corev1.Container{ 34 | Name: "k6-curl", 35 | Image: image, 36 | ImagePullPolicy: imagePullPolicy, 37 | Env: env, 38 | Resources: corev1.ResourceRequirements{ 39 | Requests: corev1.ResourceList{ 40 | corev1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI), 41 | corev1.ResourceMemory: *resource.NewQuantity(2097152, resource.BinarySI), 42 | }, 43 | Limits: corev1.ResourceList{ 44 | corev1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), 45 | corev1.ResourceMemory: *resource.NewQuantity(209715200, resource.BinarySI), 46 | }, 47 | }, 48 | Command: append( 49 | command, 50 | strings.Join(parts, ";"), 51 | ), 52 | SecurityContext: &securityContext, 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /pkg/resources/containers/s3.go: -------------------------------------------------------------------------------- 1 | package containers 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/grafana/k6-operator/api/v1alpha1" 7 | corev1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | // NewS3InitContainer is used to download a script archive from S3. 11 | func NewS3InitContainer(uri, image string, volumeMount corev1.VolumeMount) v1alpha1.InitContainer { 12 | return v1alpha1.InitContainer{ 13 | Name: "archive-download", 14 | Image: image, 15 | Command: []string{"sh", "-c", fmt.Sprintf("curl -X GET -L '%s' > /test/archive.tar ; ls -l /test", uri)}, 16 | VolumeMounts: []corev1.VolumeMount{volumeMount}, 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /pkg/resources/jobs/helpers.go: -------------------------------------------------------------------------------- 1 | package jobs 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/grafana/k6-operator/pkg/types" 8 | 9 | "github.com/grafana/k6-operator/api/v1alpha1" 10 | corev1 "k8s.io/api/core/v1" 11 | ) 12 | 13 | func newLabels(name string) map[string]string { 14 | return map[string]string{ 15 | "app": "k6", 16 | "k6_cr": name, 17 | } 18 | } 19 | 20 | func newIstioCommand(istioEnabled string, inheritedCommands []string) ([]string, bool) { 21 | istio := false 22 | if istioEnabled != "" { 23 | istio, _ = strconv.ParseBool(istioEnabled) 24 | } 25 | var command []string 26 | 27 | if istio { 28 | command = append(command, "scuttle") 29 | } 30 | 31 | command = append(command, inheritedCommands...) 32 | 33 | return command, istio 34 | } 35 | 36 | func newIstioEnvVar(istio v1alpha1.K6Scuttle, istioEnabled bool) []corev1.EnvVar { 37 | env := []corev1.EnvVar{} 38 | 39 | if istioEnabled { 40 | var istioQuitApi string 41 | var envoyAdminApi string 42 | var waitForEnvoyTimeout string 43 | 44 | if istio.EnvoyAdminApi != "" { 45 | envoyAdminApi = istio.EnvoyAdminApi 46 | } else { 47 | envoyAdminApi = "http://127.0.0.1:15000" 48 | } 49 | env = append(env, corev1.EnvVar{ 50 | Name: "ENVOY_ADMIN_API", 51 | Value: envoyAdminApi, 52 | }) 53 | 54 | if istio.IstioQuitApi != "" { 55 | istioQuitApi = istio.IstioQuitApi 56 | } else { 57 | istioQuitApi = "http://127.0.0.1:15020" 58 | } 59 | env = append(env, corev1.EnvVar{ 60 | Name: "ISTIO_QUIT_API", 61 | Value: istioQuitApi, 62 | }) 63 | 64 | if istio.WaitForEnvoyTimeout != "" { 65 | waitForEnvoyTimeout = istio.WaitForEnvoyTimeout 66 | } else { 67 | waitForEnvoyTimeout = "15" 68 | } 69 | env = append(env, corev1.EnvVar{ 70 | Name: "WAIT_FOR_ENVOY_TIMEOUT", 71 | Value: waitForEnvoyTimeout, 72 | }) 73 | 74 | if istio.NeverKillIstio { 75 | env = append(env, corev1.EnvVar{ 76 | Name: "NEVER_KILL_ISTIO", 77 | Value: strconv.FormatBool(istio.NeverKillIstio), 78 | }) 79 | } 80 | 81 | if istio.NeverKillIstioOnFailure { 82 | env = append(env, corev1.EnvVar{ 83 | Name: "NEVER_KILL_ISTIO_ON_FAILURE", 84 | Value: strconv.FormatBool(istio.NeverKillIstioOnFailure), 85 | }) 86 | } 87 | 88 | if istio.DisableLogging { 89 | env = append(env, corev1.EnvVar{ 90 | Name: "SCUTTLE_LOGGING", 91 | Value: strconv.FormatBool(false), 92 | }) 93 | } 94 | 95 | if istio.StartWithoutEnvoy { 96 | env = append(env, corev1.EnvVar{ 97 | Name: "START_WITHOUT_ENVOY", 98 | Value: strconv.FormatBool(istio.StartWithoutEnvoy), 99 | }) 100 | } 101 | 102 | if istio.GenericQuitEndpoint != "" { 103 | env = append(env, corev1.EnvVar{ 104 | Name: "GENERIC_QUIT_ENDPOINT", 105 | Value: istio.GenericQuitEndpoint, 106 | }) 107 | } 108 | 109 | if istio.QuitWithoutEnvoyTimeout != "" { 110 | env = append(env, corev1.EnvVar{ 111 | Name: "QUIT_WITHOUT_ENVOY_TIMEOUT", 112 | Value: istio.QuitWithoutEnvoyTimeout, 113 | }) 114 | } 115 | } 116 | return env 117 | } 118 | 119 | // TODO: Envoy variables are not passed to init containers 120 | func getInitContainers(pod *v1alpha1.Pod, script *types.Script) []corev1.Container { 121 | var initContainers []corev1.Container 122 | 123 | for i, k6InitContainer := range pod.InitContainers { 124 | 125 | name := fmt.Sprintf("k6-init-%d", i) 126 | if k6InitContainer.Name != "" { 127 | name = k6InitContainer.Name 128 | } 129 | 130 | volumeMounts := append(script.VolumeMount(), k6InitContainer.VolumeMounts...) 131 | 132 | initContainer := corev1.Container{ 133 | Name: name, 134 | Image: k6InitContainer.Image, 135 | Command: k6InitContainer.Command, 136 | Args: k6InitContainer.Args, 137 | WorkingDir: k6InitContainer.WorkingDir, 138 | EnvFrom: k6InitContainer.EnvFrom, 139 | Env: k6InitContainer.Env, 140 | VolumeMounts: volumeMounts, 141 | ImagePullPolicy: pod.ImagePullPolicy, 142 | SecurityContext: &pod.ContainerSecurityContext, 143 | } 144 | initContainers = append(initContainers, initContainer) 145 | } 146 | 147 | return initContainers 148 | } 149 | -------------------------------------------------------------------------------- /pkg/resources/jobs/helpers_test.go: -------------------------------------------------------------------------------- 1 | package jobs 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/go-test/deep" 8 | "github.com/grafana/k6-operator/api/v1alpha1" 9 | corev1 "k8s.io/api/core/v1" 10 | ) 11 | 12 | func TestNewLabels(t *testing.T) { 13 | 14 | expectedOutcome := map[string]string{ 15 | "app": "k6", 16 | "k6_cr": "test", 17 | } 18 | labels := newLabels("test") 19 | if !reflect.DeepEqual(labels, expectedOutcome) { 20 | t.Errorf("new labels were incorrect, got: %v, want: %v.", labels, expectedOutcome) 21 | } 22 | } 23 | 24 | func TestNewIstioCommandIfTrue(t *testing.T) { 25 | expectedOutcome := []string{"scuttle", "k6", "run"} 26 | command, _ := newIstioCommand("true", []string{"k6", "run"}) 27 | 28 | if diff := deep.Equal(expectedOutcome, command); diff != nil { 29 | t.Errorf("newIstioCommand returned unexpected data, diff: %s", diff) 30 | } 31 | } 32 | 33 | func TestNewIstioCommandIfFalse(t *testing.T) { 34 | expectedOutcome := []string{"k6", "run"} 35 | command, _ := newIstioCommand("false", []string{"k6", "run"}) 36 | 37 | if diff := deep.Equal(expectedOutcome, command); diff != nil { 38 | t.Errorf("newIstioCommand returned unexpected data, diff: %s", diff) 39 | } 40 | } 41 | 42 | func TestNewIstioDefaultEnvVar(t *testing.T) { 43 | expectedOutcome := []corev1.EnvVar{ 44 | { 45 | Name: "ENVOY_ADMIN_API", 46 | Value: "http://127.0.0.1:15000", 47 | }, 48 | { 49 | Name: "ISTIO_QUIT_API", 50 | Value: "http://127.0.0.1:15020", 51 | }, 52 | { 53 | Name: "WAIT_FOR_ENVOY_TIMEOUT", 54 | Value: "15", 55 | }, 56 | } 57 | 58 | envVars := newIstioEnvVar(v1alpha1.K6Scuttle{ 59 | EnvoyAdminApi: "", 60 | IstioQuitApi: "", 61 | WaitForEnvoyTimeout: "", 62 | }, true) 63 | 64 | if !reflect.DeepEqual(envVars, expectedOutcome) { 65 | t.Errorf("new envVars were incorrect, got: %v, want: %v.", envVars, expectedOutcome) 66 | } 67 | } 68 | 69 | func TestNewIstioEnvVarVaryingTheDefault(t *testing.T) { 70 | 71 | expectedOutcome := []corev1.EnvVar{ 72 | { 73 | Name: "ENVOY_ADMIN_API", 74 | Value: "http://localhost:15020", 75 | }, 76 | { 77 | Name: "ISTIO_QUIT_API", 78 | Value: "http://127.17.0.1:15020", 79 | }, 80 | { 81 | Name: "WAIT_FOR_ENVOY_TIMEOUT", 82 | Value: "50", 83 | }, 84 | } 85 | 86 | envVars := newIstioEnvVar(v1alpha1.K6Scuttle{ 87 | EnvoyAdminApi: "http://localhost:15020", 88 | IstioQuitApi: "http://127.17.0.1:15020", 89 | WaitForEnvoyTimeout: "50", 90 | }, true) 91 | 92 | if !reflect.DeepEqual(envVars, expectedOutcome) { 93 | t.Errorf("new envVars were incorrect, got: %v, want: %v.", envVars, expectedOutcome) 94 | } 95 | } 96 | 97 | func TestNewIstioEnvVarTrueValues(t *testing.T) { 98 | expectedOutcome := []corev1.EnvVar{ 99 | { 100 | Name: "ENVOY_ADMIN_API", 101 | Value: "http://127.0.0.1:15000", 102 | }, 103 | { 104 | Name: "ISTIO_QUIT_API", 105 | Value: "http://127.0.0.1:15020", 106 | }, 107 | { 108 | Name: "WAIT_FOR_ENVOY_TIMEOUT", 109 | Value: "15", 110 | }, 111 | { 112 | Name: "SCUTTLE_LOGGING", 113 | Value: "false", 114 | }, 115 | } 116 | 117 | envVars := newIstioEnvVar(v1alpha1.K6Scuttle{ 118 | EnvoyAdminApi: "", 119 | IstioQuitApi: "", 120 | WaitForEnvoyTimeout: "", 121 | DisableLogging: true, 122 | }, true) 123 | 124 | if !reflect.DeepEqual(envVars, expectedOutcome) { 125 | t.Errorf("new envVars were incorrect, got: %v, want: %v.", envVars, expectedOutcome) 126 | } 127 | } 128 | 129 | func TestNewIstioEnvVarFalseValues(t *testing.T) { 130 | expectedOutcome := []corev1.EnvVar{ 131 | { 132 | Name: "ENVOY_ADMIN_API", 133 | Value: "http://127.0.0.1:15000", 134 | }, 135 | { 136 | Name: "ISTIO_QUIT_API", 137 | Value: "http://127.0.0.1:15020", 138 | }, 139 | { 140 | Name: "WAIT_FOR_ENVOY_TIMEOUT", 141 | Value: "15", 142 | }, 143 | } 144 | 145 | envVars := newIstioEnvVar(v1alpha1.K6Scuttle{ 146 | EnvoyAdminApi: "", 147 | IstioQuitApi: "", 148 | WaitForEnvoyTimeout: "", 149 | DisableLogging: false, 150 | }, true) 151 | 152 | if !reflect.DeepEqual(envVars, expectedOutcome) { 153 | t.Errorf("new envVars were incorrect, got: %v, want: %v.", envVars, expectedOutcome) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /pkg/resources/jobs/initializer_test.go: -------------------------------------------------------------------------------- 1 | package jobs 2 | 3 | import ( 4 | "testing" 5 | 6 | deep "github.com/go-test/deep" 7 | "github.com/grafana/k6-operator/api/v1alpha1" 8 | "github.com/grafana/k6-operator/pkg/types" 9 | batchv1 "k8s.io/api/batch/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | ) 13 | 14 | func TestNewInitializerJob(t *testing.T) { 15 | script := &types.Script{ 16 | Name: "test", 17 | Filename: "test.js", 18 | Type: "ConfigMap", 19 | } 20 | 21 | automountServiceAccountToken := true 22 | zero := int32(0) 23 | 24 | expectedOutcome := &batchv1.Job{ 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: "test-initializer", 27 | Namespace: "test", 28 | Labels: map[string]string{ 29 | "app": "k6", 30 | "k6_cr": "test", 31 | "label1": "awesome", 32 | }, 33 | Annotations: map[string]string{ 34 | "awesomeAnnotation": "dope", 35 | }, 36 | }, 37 | Spec: batchv1.JobSpec{ 38 | BackoffLimit: &zero, 39 | Template: corev1.PodTemplateSpec{ 40 | ObjectMeta: metav1.ObjectMeta{ 41 | Labels: map[string]string{ 42 | "app": "k6", 43 | "k6_cr": "test", 44 | "label1": "awesome", 45 | }, 46 | Annotations: map[string]string{ 47 | "awesomeAnnotation": "dope", 48 | }, 49 | }, 50 | Spec: corev1.PodSpec{ 51 | AutomountServiceAccountToken: &automountServiceAccountToken, 52 | ServiceAccountName: "default", 53 | Affinity: nil, 54 | NodeSelector: nil, 55 | Tolerations: nil, 56 | TopologySpreadConstraints: nil, 57 | RestartPolicy: corev1.RestartPolicyNever, 58 | SecurityContext: &corev1.PodSecurityContext{}, 59 | Containers: []corev1.Container{ 60 | { 61 | Image: "ghcr.io/grafana/k6-operator:latest-runner", 62 | ImagePullPolicy: "", 63 | Name: "k6", 64 | Command: []string{ 65 | "sh", "-c", 66 | "mkdir -p $(dirname /tmp/test.js.archived.tar) && k6 archive /test/test.js -O /tmp/test.js.archived.tar --out cloud 2> /tmp/k6logs && k6 inspect --execution-requirements /tmp/test.js.archived.tar 2> /tmp/k6logs ; ! cat /tmp/k6logs | grep 'level=error'", 67 | }, 68 | Env: []corev1.EnvVar{}, 69 | EnvFrom: []corev1.EnvFromSource{ 70 | { 71 | ConfigMapRef: &corev1.ConfigMapEnvSource{ 72 | LocalObjectReference: corev1.LocalObjectReference{ 73 | Name: "env", 74 | }, 75 | }, 76 | }, 77 | }, 78 | Resources: corev1.ResourceRequirements{}, 79 | VolumeMounts: script.VolumeMount(), 80 | Ports: []corev1.ContainerPort{{ContainerPort: 6565}}, 81 | SecurityContext: &corev1.SecurityContext{}, 82 | }, 83 | }, 84 | Volumes: script.Volume(), 85 | }, 86 | }, 87 | }, 88 | } 89 | 90 | k6 := &v1alpha1.TestRun{ 91 | ObjectMeta: metav1.ObjectMeta{ 92 | Name: "test", 93 | Namespace: "test", 94 | }, 95 | Spec: v1alpha1.TestRunSpec{ 96 | Script: v1alpha1.K6Script{ 97 | ConfigMap: v1alpha1.K6Configmap{ 98 | Name: "test", 99 | File: "test.js", 100 | }, 101 | }, 102 | Arguments: "--out cloud", 103 | Initializer: &v1alpha1.Pod{ 104 | Metadata: v1alpha1.PodMetadata{ 105 | Labels: map[string]string{ 106 | "label1": "awesome", 107 | }, 108 | Annotations: map[string]string{ 109 | "awesomeAnnotation": "dope", 110 | }, 111 | }, 112 | EnvFrom: []corev1.EnvFromSource{ 113 | { 114 | ConfigMapRef: &corev1.ConfigMapEnvSource{ 115 | LocalObjectReference: corev1.LocalObjectReference{ 116 | Name: "env", 117 | }, 118 | }, 119 | }, 120 | }, 121 | }, 122 | }, 123 | } 124 | 125 | job, err := NewInitializerJob(k6, "--out cloud") 126 | if err != nil { 127 | t.Errorf("NewInitializerJob errored, got: %v", err) 128 | } 129 | 130 | if diff := deep.Equal(job, expectedOutcome); diff != nil { 131 | t.Error(diff) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /pkg/resources/jobs/starter.go: -------------------------------------------------------------------------------- 1 | package jobs 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/grafana/k6-operator/api/v1alpha1" 8 | "github.com/grafana/k6-operator/pkg/resources/containers" 9 | batchv1 "k8s.io/api/batch/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | ) 13 | 14 | // NewStarterJob builds a template used for creating a starter job 15 | func NewStarterJob(k6 *v1alpha1.TestRun, hostname []string) *batchv1.Job { 16 | 17 | starterAnnotations := make(map[string]string) 18 | if k6.GetSpec().Starter.Metadata.Annotations != nil { 19 | starterAnnotations = k6.GetSpec().Starter.Metadata.Annotations 20 | } 21 | 22 | starterImage := "ghcr.io/grafana/k6-operator:latest-starter" 23 | if k6.GetSpec().Starter.Image != "" { 24 | starterImage = k6.GetSpec().Starter.Image 25 | } 26 | 27 | starterLabels := newLabels(k6.NamespacedName().Name) 28 | if k6.GetSpec().Starter.Metadata.Labels != nil { 29 | for k, v := range k6.GetSpec().Starter.Metadata.Labels { // Order not specified 30 | if _, ok := starterLabels[k]; !ok { 31 | starterLabels[k] = v 32 | } 33 | } 34 | } 35 | serviceAccountName := "default" 36 | if k6.GetSpec().Starter.ServiceAccountName != "" { 37 | serviceAccountName = k6.GetSpec().Starter.ServiceAccountName 38 | } 39 | automountServiceAccountToken := true 40 | if k6.GetSpec().Starter.AutomountServiceAccountToken != "" { 41 | automountServiceAccountToken, _ = strconv.ParseBool(k6.GetSpec().Starter.AutomountServiceAccountToken) 42 | } 43 | 44 | command, istioEnabled := newIstioCommand(k6.GetSpec().Scuttle.Enabled, []string{"sh", "-c"}) 45 | env := newIstioEnvVar(k6.GetSpec().Scuttle, istioEnabled) 46 | return &batchv1.Job{ 47 | ObjectMeta: metav1.ObjectMeta{ 48 | Name: fmt.Sprintf("%s-starter", k6.NamespacedName().Name), 49 | Namespace: k6.NamespacedName().Namespace, 50 | Labels: starterLabels, 51 | Annotations: starterAnnotations, 52 | }, 53 | Spec: batchv1.JobSpec{ 54 | Template: corev1.PodTemplateSpec{ 55 | ObjectMeta: metav1.ObjectMeta{ 56 | Labels: starterLabels, 57 | Annotations: starterAnnotations, 58 | }, 59 | Spec: corev1.PodSpec{ 60 | AutomountServiceAccountToken: &automountServiceAccountToken, 61 | ServiceAccountName: serviceAccountName, 62 | Affinity: k6.GetSpec().Starter.Affinity, 63 | NodeSelector: k6.GetSpec().Starter.NodeSelector, 64 | Tolerations: k6.GetSpec().Starter.Tolerations, 65 | TopologySpreadConstraints: k6.GetSpec().Starter.TopologySpreadConstraints, 66 | RestartPolicy: corev1.RestartPolicyNever, 67 | SecurityContext: &k6.GetSpec().Starter.SecurityContext, 68 | ImagePullSecrets: k6.GetSpec().Starter.ImagePullSecrets, 69 | Containers: []corev1.Container{ 70 | containers.NewStartContainer(hostname, starterImage, k6.GetSpec().Starter.ImagePullPolicy, command, env, k6.GetSpec().Starter.ContainerSecurityContext), 71 | }, 72 | }, 73 | }, 74 | }, 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /pkg/resources/jobs/stopper.go: -------------------------------------------------------------------------------- 1 | package jobs 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/grafana/k6-operator/api/v1alpha1" 7 | "github.com/grafana/k6-operator/pkg/resources/containers" 8 | batchv1 "k8s.io/api/batch/v1" 9 | corev1 "k8s.io/api/core/v1" 10 | ) 11 | 12 | // NewStopJob builds a template used for creating a stop job 13 | func NewStopJob(k6 *v1alpha1.TestRun, hostname []string) *batchv1.Job { 14 | // this job is almost identical to the starter so re-use the definitions 15 | job := NewStarterJob(k6, hostname) 16 | 17 | job.Name = fmt.Sprintf("%s-stopper", k6.NamespacedName().Name) 18 | 19 | image := "ghcr.io/grafana/k6-operator:latest-starter" 20 | if k6.GetSpec().Starter.Image != "" { 21 | image = k6.GetSpec().Starter.Image 22 | } 23 | 24 | command, istioEnabled := newIstioCommand(k6.GetSpec().Scuttle.Enabled, []string{"sh", "-c"}) 25 | env := newIstioEnvVar(k6.GetSpec().Scuttle, istioEnabled) 26 | 27 | job.Spec.Template.Spec.Containers = []corev1.Container{ 28 | containers.NewStopContainer(hostname, image, k6.GetSpec().Starter.ImagePullPolicy, command, env, k6.GetSpec().Starter.ContainerSecurityContext), 29 | } 30 | 31 | return job 32 | } 33 | -------------------------------------------------------------------------------- /pkg/segmentation/segmentation.go: -------------------------------------------------------------------------------- 1 | package segmentation 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | const ( 10 | beginning = "0" 11 | end = "1" 12 | ) 13 | 14 | // NewCommandFragments builds command fragments for starting k6 with execution segments. 15 | func NewCommandFragments(index int, total int) ([]string, error) { 16 | 17 | if index > total { 18 | return nil, errors.New("node index exceeds configured parallelism") 19 | } 20 | 21 | parts := []string{beginning} 22 | 23 | for i := 1; i < total; i++ { 24 | parts = append(parts, fmt.Sprintf("%d/%d", i, total)) 25 | } 26 | 27 | parts = append(parts, end) 28 | 29 | getSegmentPart := func(index int, total int) string { 30 | if index == 0 { 31 | return "0" 32 | } 33 | if index == total { 34 | return "1" 35 | } 36 | return fmt.Sprintf("%d/%d", index, total) 37 | } 38 | 39 | segment := fmt.Sprintf("%s:%s", getSegmentPart(index-1, total), getSegmentPart(index, total)) 40 | sequence := strings.Join(parts[:], ",") 41 | 42 | return []string{ 43 | fmt.Sprintf("--execution-segment=%s", segment), 44 | fmt.Sprintf("--execution-segment-sequence=%s", sequence), 45 | }, nil 46 | } 47 | -------------------------------------------------------------------------------- /pkg/segmentation/suite_test.go: -------------------------------------------------------------------------------- 1 | package segmentation_test 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/grafana/k6-operator/pkg/segmentation" 8 | . "github.com/onsi/ginkgo" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | func TestSegmentation(t *testing.T) { 13 | RegisterFailHandler(Fail) 14 | 15 | RunSpecs(t, "Segmentation Suite") 16 | } 17 | 18 | var _ = Describe("the execution segmentation string generator", func() { 19 | When("given the index 1 and total 4", func() { 20 | It("should return proper segmentation fragments", func() { 21 | output, err := segmentation.NewCommandFragments(1, 4) 22 | fmt.Print(output) 23 | Expect(err).NotTo(HaveOccurred()) 24 | Expect(output).To(Equal([]string{ 25 | "--execution-segment=0:1/4", 26 | "--execution-segment-sequence=0,1/4,2/4,3/4,1", 27 | })) 28 | }) 29 | }) 30 | }) 31 | -------------------------------------------------------------------------------- /pkg/testrun/k6client.go: -------------------------------------------------------------------------------- 1 | package testrun 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "net/http" 9 | "net/url" 10 | 11 | "github.com/grafana/k6-operator/pkg/types" 12 | k6Client "go.k6.io/k6/api/v1/client" 13 | ) 14 | 15 | // This will probably be removed once distributed mode in k6 is implemented. 16 | 17 | func RunSetup(ctx context.Context, hostname string) (_ json.RawMessage, err error) { 18 | c, err := k6Client.New(fmt.Sprintf("%v:6565", hostname), k6Client.WithHTTPClient(&http.Client{ 19 | Timeout: 0, 20 | })) 21 | if err != nil { 22 | return 23 | } 24 | 25 | var response types.SetupData 26 | if err = c.CallAPI(ctx, "POST", &url.URL{Path: "/v1/setup"}, nil, &response); err != nil { 27 | return nil, err 28 | } 29 | 30 | if response.Data.Attributes.Data != nil { 31 | var tmpSetupDataObj interface{} 32 | if err := json.Unmarshal(response.Data.Attributes.Data, &tmpSetupDataObj); err != nil { 33 | return nil, err 34 | } 35 | } 36 | 37 | return response.Data.Attributes.Data, nil 38 | } 39 | 40 | func SetSetupData(ctx context.Context, hostnames []string, data json.RawMessage) (err error) { 41 | for _, hostname := range hostnames { 42 | c, err := k6Client.New(fmt.Sprintf("%v:6565", hostname), k6Client.WithHTTPClient(&http.Client{ 43 | Timeout: 0, 44 | })) 45 | if err != nil { 46 | return err 47 | } 48 | 49 | if err = c.CallAPI(ctx, "PUT", &url.URL{Path: "/v1/setup"}, data, nil); err != nil { 50 | return err 51 | } 52 | } 53 | 54 | return nil 55 | } 56 | 57 | func RunTeardown(ctx context.Context, hostnames []string) (err error) { 58 | if len(hostnames) == 0 { 59 | return errors.New("no k6 Service is available to run teardown") 60 | } 61 | 62 | c, err := k6Client.New(fmt.Sprintf("%v:6565", hostnames[0]), k6Client.WithHTTPClient(&http.Client{ 63 | Timeout: 0, 64 | })) 65 | if err != nil { 66 | return 67 | } 68 | 69 | return c.CallAPI(ctx, "POST", &url.URL{Path: "/v1/teardown"}, nil, nil) 70 | } 71 | -------------------------------------------------------------------------------- /pkg/testrun/template.go: -------------------------------------------------------------------------------- 1 | package testrun 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/grafana/k6-operator/api/v1alpha1" 7 | ) 8 | 9 | // Template is a draft of TestRun CR that can be used to create 10 | // a new TestRun by copying and injecting new values 11 | type Template v1alpha1.TestRun 12 | 13 | func (t *Template) Create() *v1alpha1.TestRun { 14 | tr := v1alpha1.TestRun(*t) 15 | return tr.DeepCopy() 16 | } 17 | 18 | func PLZTestName(testRunId string) string { 19 | return fmt.Sprintf("plz-test-%s", testRunId) 20 | } 21 | -------------------------------------------------------------------------------- /pkg/types/conditions.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | 6 | "k8s.io/apimachinery/pkg/api/meta" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | func UpdateCondition(conditions *[]metav1.Condition, conditionType string, conditionStatus metav1.ConditionStatus) { 11 | reason, ok := reasons[conditionType+string(conditionStatus)] 12 | if !ok { 13 | panic(fmt.Sprintf("Invalid condition type and status! `%s` - this should never happen!", conditionType+string(conditionStatus))) 14 | } 15 | meta.SetStatusCondition(conditions, metav1.Condition{ 16 | Type: conditionType, 17 | Status: conditionStatus, 18 | LastTransitionTime: metav1.Now(), 19 | Reason: reason, 20 | Message: "", 21 | }) 22 | } 23 | 24 | // SetIfNewer changes cond only if changes in proposedCond are consistent 25 | // with the expected change of conditions both logically and chronologically. 26 | // callbackF can be provided to run a custom function during the loop 27 | // over proposedCond. 28 | // If there were any acceptable changes proposed, it returns true. 29 | func SetIfNewer(cond *[]metav1.Condition, 30 | proposedCond []metav1.Condition, 31 | callbackF func(metav1.Condition) bool) (isNewer bool) { 32 | 33 | existingConditions := map[string]metav1.Condition{} 34 | for i := range *cond { 35 | existingConditions[(*cond)[i].Type] = (*cond)[i] 36 | } 37 | 38 | for _, proposedCondition := range proposedCond { 39 | // If a new condition is being proposed, just add it to the list. 40 | if existingCondition, ok := existingConditions[proposedCondition.Type]; !ok { 41 | *cond = append(*cond, proposedCondition) 42 | isNewer = true 43 | } else { 44 | // If a change in existing condition is being proposed, check if 45 | // its timestamp is later than the one in existing condition. 46 | // 47 | // Additionally: condition should never return to Unknown status 48 | // unless it's newly created. 49 | 50 | if proposedCondition.Status != metav1.ConditionUnknown { 51 | if existingCondition.LastTransitionTime.UnixNano() < proposedCondition.LastTransitionTime.UnixNano() { 52 | meta.SetStatusCondition(cond, proposedCondition) 53 | isNewer = true 54 | } 55 | } 56 | } 57 | 58 | if callbackF != nil { 59 | if callbackResult := callbackF(proposedCondition); callbackResult { 60 | isNewer = callbackResult 61 | } 62 | } 63 | } 64 | 65 | return 66 | } 67 | 68 | var reasons = map[string]string{ 69 | "TestRunRunningUnknown": "TestRunPreparation", 70 | "TestRunRunningTrue": "TestRunRunningTrue", 71 | "TestRunRunningFalse": "TestRunRunningFalse", 72 | 73 | "TeardownExecutedUnknown": "TestRunPreparation", 74 | "TeardownExecutedFalse": "TeardownExecutedFalse", 75 | "TeardownExecutedTrue": "TeardownExecutedTrue", 76 | 77 | "CloudTestRunUnknown": "TestRunTypeUnknown", 78 | "CloudTestRunTrue": "CloudTestRunTrue", 79 | "CloudTestRunFalse": "CloudTestRunFalse", 80 | 81 | "CloudTestRunCreatedUnknown": "CloudTestRunCreatedUnknown", 82 | "CloudTestRunCreatedTrue": "CloudTestRunCreatedTrue", 83 | "CloudTestRunCreatedFalse": "CloudTestRunCreatedFalse", 84 | 85 | "CloudTestRunFinalizedUnknown": "CloudTestRunFinalizedUnknown", 86 | "CloudTestRunFinalizedTrue": "CloudTestRunFinalizedTrue", 87 | "CloudTestRunFinalizedFalse": "CloudTestRunFinalizedFalse", 88 | 89 | "CloudPLZTestRunUnknown": "CloudPLZTestRunUnknown", 90 | "CloudPLZTestRunTrue": "CloudPLZTestRunTrue", 91 | "CloudPLZTestRunFalse": "CloudPLZTestRunFalse", 92 | 93 | "PLZRegisteredUnknown": "PLZRegisteredUnknown", 94 | "PLZRegisteredTrue": "PLZRegisteredTrue", 95 | "PLZRegisteredFalse": "PLZRegisteredFalse", 96 | 97 | "CloudTestRunAbortedUnknown": "CloudTestRunAbortedUnknown", 98 | "CloudTestRunAbortedTrue": "CloudTestRunAbortedTrue", 99 | "CloudTestRunAbortedFalse": "CloudTestRunAbortedFalse", 100 | } 101 | -------------------------------------------------------------------------------- /pkg/types/k6cli.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "strings" 4 | 5 | // CLI is an internal type to support k6 invocation in initialization stage. 6 | // Not all k6 commands allow the same set of arguments so CLI is an object 7 | // meant to contain only the ones fit for the archive call. 8 | // Maybe revise this once crococonf is closer to integration? 9 | type CLI struct { 10 | ArchiveArgs string 11 | // k6-operator doesn't care for most values of CLI arguments to k6, with an exception of cloud output 12 | HasCloudOut bool 13 | } 14 | 15 | func ParseCLI(arguments string) *CLI { 16 | lastArgV := func(start int, args []string) (end int) { 17 | end = start 18 | for end < len(args) { 19 | args[end] = strings.TrimSpace(args[end]) 20 | if len(args[end]) > 0 && args[end][0] == '-' { 21 | break 22 | } 23 | end++ 24 | } 25 | return 26 | } 27 | 28 | var cli CLI 29 | 30 | args := strings.Split(arguments, " ") 31 | i := 0 32 | for i < len(args) { 33 | args[i] = strings.TrimSpace(args[i]) 34 | if len(args[i]) == 0 { 35 | i++ 36 | continue 37 | } 38 | if args[i][0] == '-' { 39 | end := lastArgV(i+1, args) 40 | 41 | switch args[i] { 42 | case "-o", "--out": 43 | for j := 0; j < end; j++ { 44 | if args[j] == "cloud" { 45 | cli.HasCloudOut = true 46 | } 47 | } 48 | case "-l", "--linger", "--no-usage-report": 49 | // non-archive arguments, so skip them 50 | break 51 | case "--verbose", "-v": 52 | // this argument is acceptable by archive but it'd 53 | // mess up the JSON output of `k6 inspect` 54 | break 55 | default: 56 | if len(cli.ArchiveArgs) > 0 { 57 | cli.ArchiveArgs += " " 58 | } 59 | cli.ArchiveArgs += strings.Join(args[i:end], " ") 60 | } 61 | i = end 62 | } 63 | } 64 | 65 | return &cli 66 | } 67 | -------------------------------------------------------------------------------- /pkg/types/k6cli_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func Test_ParseCLI(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | argLine string 13 | cli CLI 14 | }{ 15 | { 16 | "EmptyArgs", 17 | "", 18 | CLI{}, 19 | }, 20 | { 21 | "ShortArchiveArgs", 22 | "-u 10 -d 5", 23 | CLI{ 24 | ArchiveArgs: "-u 10 -d 5", 25 | }, 26 | }, 27 | { 28 | "LongArchiveArgs", 29 | "--vus 10 --duration 5", 30 | CLI{ 31 | ArchiveArgs: "--vus 10 --duration 5", 32 | }, 33 | }, 34 | { 35 | "ShortNonArchiveArg", 36 | "-u 10 -d 5 -l", 37 | CLI{ 38 | ArchiveArgs: "-u 10 -d 5", 39 | }, 40 | }, 41 | { 42 | "LongNonArchiveArgs", 43 | "--vus 10 --duration 5 --linger", 44 | CLI{ 45 | ArchiveArgs: "--vus 10 --duration 5", 46 | }, 47 | }, 48 | { 49 | "OutWithoutCloudArgs", 50 | "--vus 10 -o json -o csv", 51 | CLI{ 52 | ArchiveArgs: "--vus 10", 53 | HasCloudOut: false, 54 | }, 55 | }, 56 | { 57 | "OutWithCloudArgs", 58 | "--vus 10 --out json -o csv --out cloud", 59 | CLI{ 60 | ArchiveArgs: "--vus 10", 61 | HasCloudOut: true, 62 | }, 63 | }, 64 | { 65 | "VerboseOutWithCloudArgs", 66 | "--vus 10 --out json -o csv --out cloud --verbose", 67 | CLI{ 68 | ArchiveArgs: "--vus 10", 69 | HasCloudOut: true, 70 | }, 71 | }, 72 | } 73 | 74 | for _, test := range tests { 75 | test := test 76 | t.Run(test.name, func(t *testing.T) { 77 | t.Parallel() 78 | cli := ParseCLI(test.argLine) 79 | 80 | assert.Equal(t, test.cli.ArchiveArgs, cli.ArchiveArgs) 81 | assert.Equal(t, test.cli.HasCloudOut, cli.HasCloudOut) 82 | }) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /pkg/types/k6status.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "encoding/json" 4 | 5 | // k6 REST API types. 6 | // TODO: refactor with existing definitions in k6 api/v1? 7 | 8 | type StatusAPIRequest struct { 9 | Data StatusAPIRequestData `json:"data"` 10 | } 11 | 12 | type StatusAPIRequestData struct { 13 | Attributes StatusAPIRequestDataAttributes `json:"attributes"` 14 | ID string `json:"id"` 15 | Type string `json:"type"` 16 | } 17 | 18 | type StatusAPIRequestDataAttributes struct { 19 | Paused bool `json:"paused"` 20 | Stopped bool `json:"stopped"` 21 | } 22 | 23 | type SetupData struct { 24 | Data setUpData `json:"data"` 25 | } 26 | 27 | type setUpData struct { 28 | Type string `json:"type"` 29 | ID string `json:"id"` 30 | Attributes setupResponseAttributes `json:"attributes"` 31 | } 32 | 33 | type setupResponseAttributes struct { 34 | Data json.RawMessage `json:"data"` 35 | } 36 | -------------------------------------------------------------------------------- /pkg/types/script.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | // Internal type created to support Spec.script options 11 | type Script struct { 12 | Name string // Name of ConfigMap or VolumeClaim or "LocalFile" 13 | ReadOnly bool // VolumeClaim only 14 | Filename string 15 | Path string 16 | Type string // ConfigMap | VolumeClaim | LocalFile 17 | } 18 | 19 | func (s *Script) FullName() string { 20 | return s.Path + s.Filename 21 | } 22 | 23 | // Volume creates a Volume spec for the script 24 | func (s *Script) Volume() []corev1.Volume { 25 | switch s.Type { 26 | case "VolumeClaim": 27 | return []corev1.Volume{ 28 | corev1.Volume{ 29 | Name: "k6-test-volume", 30 | VolumeSource: corev1.VolumeSource{ 31 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 32 | ClaimName: s.Name, 33 | ReadOnly: s.ReadOnly, 34 | }, 35 | }, 36 | }, 37 | } 38 | 39 | case "ConfigMap": 40 | return []corev1.Volume{ 41 | corev1.Volume{ 42 | Name: "k6-test-volume", 43 | VolumeSource: corev1.VolumeSource{ 44 | ConfigMap: &corev1.ConfigMapVolumeSource{ 45 | LocalObjectReference: corev1.LocalObjectReference{ 46 | Name: s.Name, 47 | }, 48 | }, 49 | }, 50 | }, 51 | } 52 | 53 | default: 54 | return []corev1.Volume{} 55 | } 56 | } 57 | 58 | // VolumeMount creates a VolumeMount spec for the script 59 | func (s *Script) VolumeMount() []corev1.VolumeMount { 60 | if s.Type == "LocalFile" { 61 | return []corev1.VolumeMount{} 62 | } 63 | 64 | return []corev1.VolumeMount{ 65 | corev1.VolumeMount{ 66 | Name: "k6-test-volume", 67 | MountPath: "/test", 68 | }, 69 | } 70 | } 71 | 72 | // UpdateCommand modifies command to check for script existence in case of LocalFile; 73 | // otherwise, command remains unmodified 74 | func (s *Script) UpdateCommand(cmd []string) []string { 75 | if s.Type == "LocalFile" { 76 | joincmd := strings.Join(cmd, " ") 77 | checkCommand := []string{ 78 | "sh", 79 | "-c", 80 | fmt.Sprintf("if [ ! -f %v ]; then echo \"LocalFile not found exiting...\"; exit 1; fi;\n%v", s.FullName(), joincmd), 81 | } 82 | return checkCommand 83 | } 84 | return cmd 85 | } 86 | --------------------------------------------------------------------------------