├── .ci-operator.yaml ├── .codecov.yml ├── .gitattributes ├── .github ├── dependabot.yml └── renovate.json ├── .gitignore ├── .tekton ├── OWNERS ├── cloud-ingress-operator-e2e-pull-request.yaml ├── cloud-ingress-operator-e2e-push.yaml ├── cloud-ingress-operator-pull-request.yaml └── cloud-ingress-operator-push.yaml ├── CLAUDE.md ├── LICENSE ├── Makefile ├── OWNERS ├── OWNERS_ALIASES ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── apischeme_types.go │ ├── groupversion_info.go │ ├── publishingstrategy_types.go │ ├── zz_generated.deepcopy.go │ └── zz_generated.openapi.go ├── boilerplate ├── _data │ ├── backing-image-tag │ └── last-boilerplate-commit ├── _lib │ ├── boilerplate-commit │ ├── boilerplate.mk │ ├── common.sh │ ├── container-make │ ├── freeze-check │ ├── release.sh │ ├── subscriber │ ├── subscriber-propose │ ├── subscriber-propose-update │ ├── subscriber-report │ ├── subscriber-report-onboarding │ ├── subscriber-report-pr │ ├── subscriber-report-release │ └── subscriber.sh ├── generated-includes.mk ├── openshift │ ├── golang-osd-e2e │ │ ├── OWNERS │ │ ├── README.md │ │ ├── e2e-template.yml │ │ ├── project.mk │ │ ├── standard.mk │ │ └── update │ └── golang-osd-operator │ │ ├── .ci-operator.yaml │ │ ├── .codecov.yml │ │ ├── Dockerfile.olm-registry │ │ ├── OWNERS_ALIASES │ │ ├── README.md │ │ ├── app-sre-build-deploy.sh │ │ ├── app-sre.md │ │ ├── build-opm-catalog.sh │ │ ├── codecov.sh │ │ ├── configure-fips.sh │ │ ├── csv-generate │ │ ├── catalog-build.sh │ │ ├── catalog-publish.sh │ │ ├── common-generate-operator-bundle.py │ │ ├── common.sh │ │ ├── csv-generate.mk │ │ └── csv-generate.sh │ │ ├── dependabot.yml │ │ ├── ensure.sh │ │ ├── fips.go.tmplt │ │ ├── golangci.yml │ │ ├── migrate_build_pipeline.py │ │ ├── project.mk │ │ ├── prow-config │ │ ├── py-requirements.txt │ │ ├── rvmo-bundle.sh │ │ ├── standard.mk │ │ ├── update │ │ └── validate-yaml.py ├── update └── update.cfg ├── build ├── Dockerfile ├── Dockerfile.olm-registry └── bin │ ├── entrypoint │ └── user_setup ├── config ├── config.go ├── metadata │ └── additional-labels.txt └── templates │ └── csv-template.yaml ├── controllers ├── apischeme │ ├── apischeme_controller.go │ ├── apischeme_controller_suite_test.go │ └── apischeme_controller_test.go ├── publishingstrategy │ ├── finalizer.go │ ├── publishingstrategy_controller.go │ ├── publishingstrategy_controller_suite_test.go │ └── publishingstrategy_controller_test.go └── routerservice │ ├── routerservice_controller.go │ ├── routerservice_controller_suite_test.go │ └── routerservice_controller_test.go ├── deploy ├── 05_cloud-ingress-operator.ServiceAccount.yaml ├── 20_cloud-ingress-operator.ClusterRole.yaml ├── 20_cloud-ingress-operator.Role.yaml ├── 20_cloud-ingress-operator_trusted-ca-bundle.ConfigMap.yaml ├── 20_cluster_config_v1_reader_role.yaml ├── 20_cluster_config_v1_reader_role_binding.yaml ├── 30_cloud-ingress-operator-RoleBinding.yaml ├── 40_prom-k8s-role.yaml ├── 40_prom-k8s-rolebinding.yaml ├── 45_prom-k8s-prometheus-rules.yaml ├── 50_cloud-ingress-operator.Deployment.yaml └── crds │ ├── cloudingress.managed.openshift.io_apischemes.yaml │ └── cloudingress.managed.openshift.io_publishingstrategies.yaml ├── examples ├── cloudingress.managed.openshift.io_v1alpha1_apischeme_cr.yaml └── cloudingress.managed.openshift.io_v1alpha1_publishingstrategy_cr.yaml ├── fips.go ├── go.mod ├── go.sum ├── hack ├── boilerplate.go.txt └── olm-registry │ └── olm-artifacts-template.yaml ├── main.go ├── main_test.go ├── pkg ├── cloudclient │ ├── add_aws.go │ ├── add_aws_test.go │ ├── add_gcp.go │ ├── add_gcp_test.go │ ├── aws │ │ ├── aws.go │ │ ├── aws_test.go │ │ ├── private.go │ │ ├── private_test.go │ │ ├── shared_credentials_file.go │ │ └── shared_credentials_file_test.go │ ├── cloudclient.go │ ├── gcp │ │ ├── gcp.go │ │ ├── gcp_test.go │ │ ├── private.go │ │ └── private_test.go │ └── mock_cloudclient │ │ └── mock_cloudclient.go ├── controllerutils │ ├── conditions.go │ └── utils.go ├── errors │ └── errors.go ├── ingresscontroller │ ├── addtoscheme.go │ └── ingresscontroller.go ├── localmetrics │ └── localmetrics.go ├── testutils │ └── testutils.go └── utils │ ├── clusterversion.go │ ├── clusterversion_test.go │ ├── healthcheck.go │ ├── healthcheck_test.go │ ├── infrastructure.go │ ├── infrastructure_test.go │ └── machines.go ├── resources ├── 20_cloud-ingress-operator_kube-apiserver.Role.yaml ├── 20_cloud-ingress-operator_kube-apiserver.RoleBinding.yaml ├── 20_cloud-ingress-operator_machine.Role.yaml ├── 20_cloud-ingress-operator_machine.RoleBinding.yaml ├── 20_cloud-ingress-operator_openshift-ingress-operator.Role.yaml ├── 20_cloud-ingress-operator_openshift-ingress-operator.RoleBinding.yaml ├── 20_cloud-ingress-operator_openshift-ingress.Role.yaml └── 20_cloud-ingress-operator_openshift-ingress.RoleBinding.yaml ├── test └── e2e │ ├── Dockerfile │ ├── README.md │ ├── cloud_ingress_operator_runner_test.go │ ├── cloud_ingress_operator_tests.go │ └── e2e-template.yml └── tools.go /.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | name: boilerplate 3 | namespace: openshift 4 | tag: image-v8.2.0 5 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: no 4 | 5 | coverage: 6 | precision: 2 7 | round: down 8 | range: "20...100" 9 | 10 | status: 11 | project: no 12 | patch: no 13 | changes: no 14 | 15 | parsers: 16 | gcov: 17 | branch_detection: 18 | conditional: yes 19 | loop: yes 20 | method: no 21 | macro: no 22 | 23 | comment: 24 | layout: "reach,diff,flags,tree" 25 | behavior: default 26 | require_changes: no 27 | 28 | ignore: 29 | - "**/mocks" 30 | - "**/zz_generated*.go" 31 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | boilerplate/** linguist-generated=true 2 | 3 | ### BEGIN BOILERPLATE GENERATED -- DO NOT EDIT ### 4 | ### This block must be the last thing in your ### 5 | ### .gitattributes file; otherwise the 'validate' ### 6 | ### CI check will fail. ### 7 | # Used to ensure nobody mucked with boilerplate files. 8 | boilerplate/_lib/freeze-check linguist-generated=false 9 | # Show the boilerplate commit hash update. It's only one line anyway. 10 | boilerplate/_data/last-boilerplate-commit linguist-generated=false 11 | # Used by freeze-check. Good place for attackers to inject badness. 12 | boilerplate/update linguist-generated=false 13 | # Make sure attackers can't hide changes to this configuration 14 | .gitattributes linguist-generated=false 15 | ### END BOILERPLATE GENERATED ### 16 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # BEGIN boilerplate-managed 2 | version: 2 3 | updates: 4 | - package-ecosystem: "docker" 5 | directory: "/build" 6 | labels: 7 | - "area/dependency" 8 | - "ok-to-test" 9 | schedule: 10 | interval: "weekly" 11 | ignore: 12 | - dependency-name: "redhat-services-prod/openshift/boilerplate" 13 | # don't upgrade boilerplate via these means 14 | - dependency-name: "openshift4/ose-operator-registry" 15 | # don't upgrade ose-operator-registry via these means 16 | # END boilerplate-managed 17 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>openshift/boilerplate//.github/renovate.json" 5 | ] 6 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary Build Files 2 | build/_output 3 | build/_test 4 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 5 | ### Emacs ### 6 | # -*- mode: gitignore; -*- 7 | *~ 8 | \#*\# 9 | /.emacs.desktop 10 | /.emacs.desktop.lock 11 | *.elc 12 | auto-save-list 13 | tramp 14 | .\#* 15 | # Org-mode 16 | .org-id-locations 17 | *_archive 18 | # flymake-mode 19 | *_flymake.* 20 | # eshell files 21 | /eshell/history 22 | /eshell/lastdir 23 | # elpa packages 24 | /elpa/ 25 | # reftex files 26 | *.rel 27 | # AUCTeX auto folder 28 | /auto/ 29 | # cask packages 30 | .cask/ 31 | dist/ 32 | # Flycheck 33 | flycheck_*.el 34 | # server auth directory 35 | /server/ 36 | # projectiles files 37 | .projectile 38 | projectile-bookmarks.eld 39 | # directory configuration 40 | .dir-locals.el 41 | # saveplace 42 | places 43 | # url cache 44 | url/cache/ 45 | # cedet 46 | ede-projects.el 47 | # smex 48 | smex-items 49 | # company-statistics 50 | company-statistics-cache.el 51 | # anaconda-mode 52 | anaconda-mode/ 53 | ### Go ### 54 | # Binaries for programs and plugins 55 | *.exe 56 | *.exe~ 57 | *.dll 58 | *.so 59 | *.dylib 60 | # Test binary, build with 'go test -c' 61 | *.test 62 | # Output of the go coverage tool, specifically when used with LiteIDE 63 | *.out 64 | ### Vim ### 65 | # swap 66 | .sw[a-p] 67 | .*.sw[a-p] 68 | # session 69 | Session.vim 70 | # temporary 71 | .netrwhist 72 | # auto-generated tag files 73 | tags 74 | ### VisualStudioCode ### 75 | .vscode/* 76 | .history 77 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 78 | 79 | build/_output 80 | 81 | # Dependency directories (remove the comment below to include it) 82 | # vendor/ 83 | .venv/ 84 | .operator-sdk/ 85 | .idea 86 | 87 | # vscode workspace files 88 | *.code-workspace 89 | 90 | ### Local testing of app-sre pipeline creates these 91 | # The docker hidden dir is created by make 92 | # for Jenkins credentials to work 93 | .docker/ 94 | saas-cloud-ingress-operator-bundle 95 | launch.json 96 | 97 | 98 | # IDE 99 | .DS_Store -------------------------------------------------------------------------------- /.tekton/OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - srep-infra-cicd 3 | approvers: 4 | - srep-infra-cicd 5 | -------------------------------------------------------------------------------- /.tekton/cloud-ingress-operator-e2e-pull-request.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/cloud-ingress-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' 8 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 9 | pipelinesascode.tekton.dev/cancel-in-progress: 'true' 10 | pipelinesascode.tekton.dev/max-keep-runs: '3' 11 | pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch == "master" 12 | creationTimestamp: null 13 | labels: 14 | appstudio.openshift.io/application: cloud-ingress-operator 15 | appstudio.openshift.io/component: cloud-ingress-operator-e2e 16 | pipelines.appstudio.openshift.io/type: build 17 | name: cloud-ingress-operator-e2e-on-pull-request 18 | namespace: cloud-ingress-operator-tenant 19 | spec: 20 | params: 21 | - name: git-url 22 | value: '{{source_url}}' 23 | - name: revision 24 | value: '{{revision}}' 25 | - name: output-image 26 | value: quay.io/redhat-user-workloads/cloud-ingress-operator-tenant/openshift/cloud-ingress-operator-e2e:on-pr-{{revision}} 27 | - name: image-expires-after 28 | value: 5d 29 | - name: dockerfile 30 | value: test/e2e/Dockerfile 31 | - name: path-context 32 | value: . 33 | taskRunTemplate: 34 | serviceAccountName: build-pipeline-cloud-ingress-operator-e2e 35 | workspaces: 36 | - name: git-auth 37 | secret: 38 | secretName: '{{ git_auth_secret }}' 39 | pipelineRef: 40 | resolver: git 41 | params: 42 | - name: url 43 | value: https://github.com/openshift/boilerplate 44 | - name: revision 45 | value: master 46 | - name: pathInRepo 47 | value: pipelines/docker-build-oci-ta/pipeline.yaml 48 | status: {} 49 | -------------------------------------------------------------------------------- /.tekton/cloud-ingress-operator-e2e-push.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/cloud-ingress-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 8 | pipelinesascode.tekton.dev/cancel-in-progress: 'false' 9 | pipelinesascode.tekton.dev/max-keep-runs: '3' 10 | pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch == "master" 11 | creationTimestamp: null 12 | labels: 13 | appstudio.openshift.io/application: cloud-ingress-operator 14 | appstudio.openshift.io/component: cloud-ingress-operator-e2e 15 | pipelines.appstudio.openshift.io/type: build 16 | name: cloud-ingress-operator-e2e-on-push 17 | namespace: cloud-ingress-operator-tenant 18 | spec: 19 | params: 20 | - name: git-url 21 | value: '{{source_url}}' 22 | - name: revision 23 | value: '{{revision}}' 24 | - name: output-image 25 | value: quay.io/redhat-user-workloads/cloud-ingress-operator-tenant/openshift/cloud-ingress-operator-e2e:{{revision}} 26 | - name: dockerfile 27 | value: test/e2e/Dockerfile 28 | - name: path-context 29 | value: . 30 | taskRunTemplate: 31 | serviceAccountName: build-pipeline-cloud-ingress-operator-e2e 32 | workspaces: 33 | - name: git-auth 34 | secret: 35 | secretName: '{{ git_auth_secret }}' 36 | pipelineRef: 37 | resolver: git 38 | params: 39 | - name: url 40 | value: https://github.com/openshift/boilerplate 41 | - name: revision 42 | value: master 43 | - name: pathInRepo 44 | value: pipelines/docker-build-oci-ta/pipeline.yaml 45 | status: {} 46 | -------------------------------------------------------------------------------- /.tekton/cloud-ingress-operator-pull-request.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/cloud-ingress-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' 8 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 9 | pipelinesascode.tekton.dev/cancel-in-progress: 'true' 10 | pipelinesascode.tekton.dev/max-keep-runs: '3' 11 | pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch == "master" 12 | creationTimestamp: null 13 | labels: 14 | appstudio.openshift.io/application: cloud-ingress-operator 15 | appstudio.openshift.io/component: cloud-ingress-operator 16 | pipelines.appstudio.openshift.io/type: build 17 | name: cloud-ingress-operator-on-pull-request 18 | namespace: cloud-ingress-operator-tenant 19 | spec: 20 | params: 21 | - name: git-url 22 | value: '{{source_url}}' 23 | - name: revision 24 | value: '{{revision}}' 25 | - name: output-image 26 | value: quay.io/redhat-user-workloads/cloud-ingress-operator-tenant/openshift/cloud-ingress-operator:on-pr-{{revision}} 27 | - name: image-expires-after 28 | value: 5d 29 | - name: dockerfile 30 | value: build/Dockerfile 31 | - name: path-context 32 | value: . 33 | taskRunTemplate: 34 | serviceAccountName: build-pipeline-cloud-ingress-operator 35 | workspaces: 36 | - name: git-auth 37 | secret: 38 | secretName: '{{ git_auth_secret }}' 39 | pipelineRef: 40 | resolver: git 41 | params: 42 | - name: url 43 | value: https://github.com/openshift/boilerplate 44 | - name: revision 45 | value: master 46 | - name: pathInRepo 47 | value: pipelines/docker-build-oci-ta/pipeline.yaml 48 | status: {} 49 | -------------------------------------------------------------------------------- /.tekton/cloud-ingress-operator-push.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/cloud-ingress-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 8 | pipelinesascode.tekton.dev/cancel-in-progress: 'false' 9 | pipelinesascode.tekton.dev/max-keep-runs: '3' 10 | pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch == "master" 11 | creationTimestamp: null 12 | labels: 13 | appstudio.openshift.io/application: cloud-ingress-operator 14 | appstudio.openshift.io/component: cloud-ingress-operator 15 | pipelines.appstudio.openshift.io/type: build 16 | name: cloud-ingress-operator-on-push 17 | namespace: cloud-ingress-operator-tenant 18 | spec: 19 | params: 20 | - name: git-url 21 | value: '{{source_url}}' 22 | - name: revision 23 | value: '{{revision}}' 24 | - name: output-image 25 | value: quay.io/redhat-user-workloads/cloud-ingress-operator-tenant/openshift/cloud-ingress-operator:{{revision}} 26 | - name: dockerfile 27 | value: build/Dockerfile 28 | - name: path-context 29 | value: . 30 | taskRunTemplate: 31 | serviceAccountName: build-pipeline-cloud-ingress-operator 32 | workspaces: 33 | - name: git-auth 34 | secret: 35 | secretName: '{{ git_auth_secret }}' 36 | pipelineRef: 37 | resolver: git 38 | params: 39 | - name: url 40 | value: https://github.com/openshift/boilerplate 41 | - name: revision 42 | value: master 43 | - name: pathInRepo 44 | value: pipelines/docker-build-oci-ta/pipeline.yaml 45 | status: {} 46 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md 2 | 3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. 4 | 5 | ## Overview 6 | 7 | The cloud-ingress-operator is a Kubernetes operator designed for OpenShift Dedicated 4.x clusters to toggle cluster components between "private" and "public" modes. It manages: 8 | 9 | 1. **API Server Access**: Default API endpoint (`api.`) and admin API endpoint (`rh-api.`) 10 | 2. **Application Ingress**: Default ingress (`*.apps.`) and optional secondary ingress (`*.apps2.`) 11 | 12 | The operator uses custom resources `APIScheme` and `PublishingStrategy` to control these behaviors and supports both AWS and GCP cloud providers. 13 | 14 | ## Commands 15 | 16 | ### Development Commands 17 | ```bash 18 | # Build the operator binary 19 | make go-build 20 | 21 | # Run tests 22 | make go-test 23 | 24 | # Run linting and static analysis 25 | make go-check 26 | 27 | # Generate code (CRDs, deepcopy, OpenAPI) 28 | make generate 29 | 30 | # Validate all code and configurations 31 | make validate 32 | 33 | # Run comprehensive linting 34 | make lint 35 | 36 | # Docker build 37 | make docker-build 38 | 39 | # Build and push container 40 | make docker-push 41 | ``` 42 | 43 | ### Testing Commands 44 | ```bash 45 | # Run unit tests with coverage 46 | make go-test 47 | 48 | # Run container tests 49 | make container-test 50 | 51 | # Generate test coverage reports 52 | make coverage 53 | 54 | # Validate YAML configurations 55 | make yaml-validate 56 | ``` 57 | 58 | ## Architecture 59 | 60 | ### Core Components 61 | 62 | **Controllers** (`controllers/`): 63 | - `apischeme/`: Manages admin API endpoint creation and configuration 64 | - `publishingstrategy/`: Handles privacy toggling for API and ingress resources 65 | - `routerservice/`: Manages router service configurations 66 | 67 | **Cloud Clients** (`pkg/cloudclient/`): 68 | - Abstract cloud provider interface with AWS and GCP implementations 69 | - Handles load balancer and security group management 70 | - Provider-specific networking configurations in `aws/` and `gcp/` subdirectories 71 | 72 | **Custom Resources** (`api/v1alpha1/`): 73 | - `APIScheme`: Configures admin API endpoint (`managementAPIServerIngress`) 74 | - `PublishingStrategy`: Controls privacy settings for `defaultAPIServerIngress` and `applicationIngress` 75 | 76 | ### Key Dependencies 77 | 78 | - **Operator SDK**: Built using controller-runtime framework 79 | - **OpenShift APIs**: Integrates with OpenShift infrastructure and ingress controllers 80 | - **Cloud SDKs**: AWS SDK and Google Cloud APIs for infrastructure management 81 | - **Boilerplate**: Uses OpenShift boilerplate for standardized build/test/deploy patterns 82 | 83 | ### Important Patterns 84 | 85 | - **Multi-cloud Support**: Conditional compilation and runtime detection for AWS vs GCP 86 | - **FIPS Compliance**: Configurable FIPS mode for cryptographic operations 87 | - **Legacy Support**: Feature flags for managing deprecated `applicationIngress` functionality 88 | - **Version-aware Logic**: Cluster version detection for compatibility (especially OCP 4.13+ changes) 89 | 90 | ## Development Notes 91 | 92 | - The operator runs with elevated permissions across multiple namespaces: `openshift-cloud-ingress-operator`, `openshift-ingress`, `openshift-ingress-operator`, `openshift-kube-apiserver`, `openshift-machine-api` 93 | - Testing requires careful setup due to dependencies on cloud infrastructure and OpenShift-specific resources 94 | - Manual testing instructions are provided in README.md for fleet deployments 95 | - The project uses generated includes from boilerplate conventions for consistent build processes -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | export KONFLUX_BUILDS=true 2 | FIPS_ENABLED=true 3 | include boilerplate/generated-includes.mk 4 | 5 | SHELL := /usr/bin/env bash 6 | 7 | .PHONY: boilerplate-update 8 | boilerplate-update: 9 | @boilerplate/update 10 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - sam-nguyen7 3 | - boranx 4 | - bng0y 5 | - robotmaxtron 6 | - ritmun 7 | - bdematte 8 | - abyrne55 9 | - reedcort 10 | - dakotalongRH 11 | - luis-falcon 12 | - rafael-azevedo 13 | approvers: 14 | - sam-nguyen7 15 | - boranx 16 | - abyrne55 17 | - luis-falcon 18 | - joshbranham 19 | -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ DO NOT EDIT ================================ 2 | # This file is managed in https://github.com/openshift/boilerplate 3 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 4 | # ============================================================================= 5 | aliases: 6 | srep-functional-team-aurora: 7 | - abyrne55 8 | - dakotalongRH 9 | - joshbranham 10 | - luis-falcon 11 | - reedcort 12 | srep-functional-team-fedramp: 13 | - theautoroboto 14 | - katherinelc321 15 | - rojasreinold 16 | - fsferraz-rh 17 | - jonahbrawley 18 | - digilink 19 | - annelson-rh 20 | - pheckenlWork 21 | - ironcladlou 22 | - MrSantamaria 23 | - PeterCSRE 24 | - cjnovak98 25 | srep-functional-team-hulk: 26 | - ravitri 27 | - devppratik 28 | - Tafhim 29 | - tkong-redhat 30 | - TheUndeadKing 31 | - vaidehi411 32 | - chamalabey 33 | - charlesgong 34 | - rbhilare 35 | srep-functional-team-orange: 36 | - bergmannf 37 | - Makdaam 38 | - Nikokolas3270 39 | - RaphaelBut 40 | - MateSaary 41 | - rolandmkunkel 42 | - petrkotas 43 | - zmird-r 44 | - hectorakemp 45 | srep-functional-team-rocket: 46 | - aliceh 47 | - anispate 48 | - clcollins 49 | - Mhodesty 50 | - nephomaniac 51 | - tnierman 52 | srep-functional-team-security: 53 | - jaybeeunix 54 | - sam-nguyen7 55 | - wshearn 56 | - dem4gus 57 | - npecka 58 | - pshickeydev 59 | - casey-williams-rh 60 | - boranx 61 | srep-functional-team-thor: 62 | - a7vicky 63 | - diakovnec 64 | - MitaliBhalla 65 | - feichashao 66 | - samanthajayasinghe 67 | - xiaoyu74 68 | - Tessg22 69 | - smarthall 70 | srep-infra-cicd: 71 | - ritmun 72 | - yiqinzhang 73 | - varunraokadaparthi 74 | srep-functional-leads: 75 | - abyrne55 76 | - clcollins 77 | - bergmannf 78 | - theautoroboto 79 | - smarthall 80 | - sam-nguyen7 81 | - ravitri 82 | srep-team-leads: 83 | - rafael-azevedo 84 | - iamkirkbater 85 | - rogbas 86 | - dustman9000 87 | - bng0y 88 | - bmeng 89 | - typeid 90 | sre-group-leads: 91 | - apahim 92 | - maorfr 93 | - rogbas 94 | srep-architects: 95 | - jharrington22 96 | - cblecker 97 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: cloudingress.managed.openshift.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | plugins: 5 | manifests.sdk.operatorframework.io/v2: {} 6 | scorecard.sdk.operatorframework.io/v2: {} 7 | projectName: cloud-ingress-operator 8 | repo: github.com/openshift/cloud-ingress-operator 9 | resources: 10 | - api: 11 | crdVersion: v1 12 | namespaced: true 13 | controller: true 14 | domain: cloudingress.managed.openshift.io 15 | group: cloudingress.managed.openshift.io 16 | kind: APIScheme 17 | path: github.com/openshift/cloud-ingress-operator/api/v1alpha1 18 | version: v1alpha1 19 | - api: 20 | crdVersion: v1 21 | namespaced: true 22 | controller: true 23 | domain: cloudingress.managed.openshift.io 24 | group: cloudingress.managed.openshift.io 25 | kind: PublishingStrategy 26 | path: github.com/openshift/cloud-ingress-operator/api/v1alpha1 27 | version: v1alpha1 28 | version: "3" 29 | -------------------------------------------------------------------------------- /api/v1alpha1/apischeme_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // APISchemeConditionType - APISchemeConditionType 25 | type APISchemeConditionType string 26 | 27 | const ( 28 | ConditionError APISchemeConditionType = "Error" 29 | ConditionReady APISchemeConditionType = "Ready" 30 | ) 31 | 32 | // APISchemeSpec defines the desired state of APIScheme 33 | type APISchemeSpec struct { 34 | // Important: Run "make" to regenerate code after modifying this file 35 | 36 | // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html 37 | ManagementAPIServerIngress ManagementAPIServerIngress `json:"managementAPIServerIngress"` 38 | } 39 | 40 | // ManagementAPIServerIngress defines the Management API ingress 41 | type ManagementAPIServerIngress struct { 42 | // Enabled to create the Management API endpoint or not. 43 | Enabled bool `json:"enabled"` 44 | // DNSName is the name that should be used for DNS of the management API, eg rh-api 45 | DNSName string `json:"dnsName"` 46 | // AllowedCIDRBlocks is the list of CIDR blocks that should be allowed to access the management API 47 | AllowedCIDRBlocks []string `json:"allowedCIDRBlocks"` 48 | } 49 | 50 | // APISchemeStatus defines the observed state of APIScheme 51 | type APISchemeStatus struct { 52 | // Important: Run "make" to regenerate code after modifying this file 53 | CloudLoadBalancerDNSName string `json:"cloudLoadBalancerDNSName,omitempty"` 54 | Conditions []APISchemeCondition `json:"conditions,omitempty"` 55 | State APISchemeConditionType `json:"state,omitempty"` 56 | } 57 | 58 | //+kubebuilder:object:root=true 59 | //+kubebuilder:subresource:status 60 | 61 | // APIScheme is the Schema for the apischemes API 62 | type APIScheme struct { 63 | metav1.TypeMeta `json:",inline"` 64 | metav1.ObjectMeta `json:"metadata,omitempty"` 65 | 66 | Spec APISchemeSpec `json:"spec"` 67 | Status APISchemeStatus `json:"status,omitempty"` 68 | } 69 | 70 | // APISchemeCondition is the history of transitions 71 | type APISchemeCondition struct { 72 | // Type is the type of condition 73 | Type APISchemeConditionType `json:"type,omitempty"` 74 | 75 | // LastTransitionTime Last change to status 76 | LastTransitionTime metav1.Time `json:"lastTransitionTime"` 77 | 78 | // LastProbeTime last time probed 79 | LastProbeTime metav1.Time `json:"lastProbeTime"` 80 | 81 | // AllowedCIDRBlocks currently allowed (as of the last successful Security Group update) 82 | AllowedCIDRBlocks []string `json:"allowedCIDRBlocks,omitempty"` 83 | 84 | // Reason is why we're making this status change 85 | Reason string `json:"reason"` 86 | 87 | // Message is an English text 88 | Message string `json:"message"` 89 | // Status 90 | Status corev1.ConditionStatus `json:"status"` 91 | } 92 | 93 | //+kubebuilder:object:root=true 94 | 95 | // APISchemeList contains a list of APIScheme 96 | type APISchemeList struct { 97 | metav1.TypeMeta `json:",inline"` 98 | metav1.ListMeta `json:"metadata,omitempty"` 99 | Items []APIScheme `json:"items"` 100 | } 101 | 102 | func init() { 103 | SchemeBuilder.Register(&APIScheme{}, &APISchemeList{}) 104 | } 105 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the cloudingress.managed.openshift.io v1alpha1 API group 18 | //+kubebuilder:object:generate=true 19 | //+groupName=cloudingress.managed.openshift.io 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "cloudingress.managed.openshift.io", Version: "v1alpha1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /api/v1alpha1/publishingstrategy_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // PublishingStrategySpec defines the desired state of PublishingStrategy 25 | type PublishingStrategySpec struct { 26 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 27 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 28 | 29 | // DefaultAPIServerIngress defines whether API is internal or external 30 | DefaultAPIServerIngress DefaultAPIServerIngress `json:"defaultAPIServerIngress"` 31 | //ApplicationIngress defines whether application ingress is internal or external 32 | ApplicationIngress []ApplicationIngress `json:"applicationIngress"` 33 | } 34 | 35 | // DefaultAPIServerIngress defines API ingress 36 | type DefaultAPIServerIngress struct { 37 | // Listening defines internal or external ingress 38 | Listening Listening `json:"listening,omitempty"` 39 | } 40 | 41 | // ApplicationIngress defines application ingress 42 | type ApplicationIngress struct { 43 | // Listening defines application ingress as internal or external 44 | Listening Listening `json:"listening,omitempty"` 45 | // Default defines default value of ingress when cluster installs 46 | Default bool `json:"default"` 47 | DNSName string `json:"dnsName"` 48 | Certificate corev1.SecretReference `json:"certificate"` 49 | RouteSelector metav1.LabelSelector `json:"routeSelector,omitempty"` 50 | Type Type `json:"type,omitempty"` 51 | } 52 | 53 | // Listening defines internal or external api and ingress 54 | type Listening string 55 | 56 | // Type indicates the type of Load Balancer to use 57 | // +kubebuilder:validation:Enum=Classic;NLB 58 | type Type string 59 | 60 | const ( 61 | // Internal const for listening status 62 | Internal Listening = "internal" 63 | // External const for listening status 64 | External Listening = "external" 65 | ) 66 | 67 | // PublishingStrategyStatus defines the observed state of PublishingStrategy 68 | type PublishingStrategyStatus struct { 69 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 70 | // Important: Run "make" to regenerate code after modifying this file 71 | } 72 | 73 | //+kubebuilder:object:root=true 74 | //+kubebuilder:subresource:status 75 | 76 | // PublishingStrategy is the Schema for the publishingstrategies API 77 | type PublishingStrategy struct { 78 | metav1.TypeMeta `json:",inline"` 79 | metav1.ObjectMeta `json:"metadata,omitempty"` 80 | 81 | Spec PublishingStrategySpec `json:"spec"` 82 | Status PublishingStrategyStatus `json:"status,omitempty"` 83 | } 84 | 85 | //+kubebuilder:object:root=true 86 | 87 | // PublishingStrategyList contains a list of PublishingStrategy 88 | type PublishingStrategyList struct { 89 | metav1.TypeMeta `json:",inline"` 90 | metav1.ListMeta `json:"metadata,omitempty"` 91 | Items []PublishingStrategy `json:"items"` 92 | } 93 | 94 | func init() { 95 | SchemeBuilder.Register(&PublishingStrategy{}, &PublishingStrategyList{}) 96 | } 97 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.openapi.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | // +build !ignore_autogenerated 3 | 4 | // Code generated by openapi-gen. DO NOT EDIT. 5 | 6 | // This file was autogenerated by openapi-gen. Do not edit it manually! 7 | 8 | package v1alpha1 9 | 10 | import ( 11 | common "k8s.io/kube-openapi/pkg/common" 12 | ) 13 | 14 | func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { 15 | return map[string]common.OpenAPIDefinition{} 16 | } 17 | -------------------------------------------------------------------------------- /boilerplate/_data/backing-image-tag: -------------------------------------------------------------------------------- 1 | image-v8.2.0 2 | -------------------------------------------------------------------------------- /boilerplate/_data/last-boilerplate-commit: -------------------------------------------------------------------------------- 1 | d34e59645cd877be62073b0df2ff91de2ea7659c 2 | -------------------------------------------------------------------------------- /boilerplate/_lib/boilerplate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: boilerplate-commit 2 | boilerplate-commit: 3 | @boilerplate/_lib/boilerplate-commit 4 | 5 | .PHONY: boilerplate-freeze-check 6 | boilerplate-freeze-check: 7 | @boilerplate/_lib/freeze-check 8 | -------------------------------------------------------------------------------- /boilerplate/_lib/container-make: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$1" == "-h"* ]] || [[ "$1" == "--h"* ]]; then 4 | echo "Usage: $0 {arguments to the real 'make'}" 5 | echo "Runs 'make' in the boilerplate backing container." 6 | echo "If the command fails, starts a shell in the container so you can debug." 7 | exit -1 8 | fi 9 | 10 | source ${0%/*}/common.sh 11 | 12 | CONTAINER_ENGINE="${CONTAINER_ENGINE:-$(command -v podman || command -v docker)}" 13 | [[ -n "$CONTAINER_ENGINE" ]] || err "Couldn't find a container engine. Are you already in a container?" 14 | 15 | # Make sure the mount inside the container is named in such a way that 16 | # - openapi-gen (which relies on GOPATH) produces absolute paths; and 17 | # - other go-ish paths are writeable, e.g. for `go mod download`. 18 | CONTAINER_MOUNT=/go/src/$(repo_import $REPO_ROOT) 19 | 20 | # First set up a detached container with the repo mounted. 21 | banner "Starting the container" 22 | CE_OPTS="--platform=linux/amd64" 23 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]]; then 24 | CE_OPTS="${CE_OPTS} --userns keep-id" 25 | fi 26 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]] && [[ $OSTYPE == *"linux"* ]]; then 27 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT:Z" 28 | else 29 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT" 30 | fi 31 | container_id=$($CONTAINER_ENGINE run -d ${CE_OPTS} $IMAGE_PULL_PATH sleep infinity) 32 | 33 | if [[ $? -ne 0 ]] || [[ -z "$container_id" ]]; then 34 | err "Couldn't start detached container" 35 | fi 36 | 37 | # Now run our `make` command in it with the right UID and working directory 38 | args="exec -it -u $(id -u):0 -w $CONTAINER_MOUNT $container_id" 39 | banner "Running: make $@" 40 | $CONTAINER_ENGINE $args make "$@" 41 | rc=$? 42 | 43 | # If it failed, drop into the container in a shell 44 | if [[ $rc -ne 0 ]]; then 45 | banner "The 'make' command failed! Starting a shell in the container for debugging. Just 'exit' when done." 46 | $CONTAINER_ENGINE $args /bin/bash 47 | fi 48 | 49 | # Finally, remove the container 50 | banner "Cleaning up the container" 51 | $CONTAINER_ENGINE rm -f $container_id >/dev/null 52 | -------------------------------------------------------------------------------- /boilerplate/_lib/freeze-check: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # NOTE: For security reasons, everything imported or invoked (even 4 | # indirectly) by this script should be audited for vulnerabilities and 5 | # explicitly excluded from `linguist-generated` in the consuming 6 | # repository's .gitattributes. In other words, we want PRs to show 7 | # deltas to this script and all its dependencies by default so that 8 | # attempts to inject or circumvent code are visible. 9 | 10 | set -e 11 | 12 | REPO_ROOT=$(git rev-parse --show-toplevel) 13 | # Hardcoded rather than sourced to reduce attack surface. 14 | BOILERPLATE_GIT_REPO=https://github.com/openshift/boilerplate.git 15 | 16 | # Validate that no subscribed boilerplate artifacts have been changed. 17 | # PR checks may wish to gate on this. 18 | 19 | # This works by grabbing the commit hash of the boilerplate repository 20 | # at which the last update was applied, running the main `update` driver 21 | # against that, and failing if there's a resulting diff. 22 | 23 | # If we can't tell what that commit was, we must assume this is the 24 | # first update, and we'll (noisily) "succeed". 25 | 26 | # Note that this ought to work when you've just committed an update, 27 | # even if you've changed your update.cfg beforehand. We're basically 28 | # making sure you didn't muck with anything after updating. 29 | 30 | # For this to work, you have to be starting from a clean repository 31 | # state (any changes committed). 32 | # TODO(efried): This is not ideal -- it would be nice if I could check 33 | # this before committing my changes -- but how would that work? Diff to 34 | # a file, create a temporary commit, run the rest, remove the commit, 35 | # and reapply the diff? Messy and error-prone -- and I would be 36 | # seriously ticked off if something went wrong and lost my in-flight 37 | # changes. 38 | if ! [ -z "$(git status --porcelain -- ':!build/Dockerfile*')" ]; then 39 | echo "Can't validate boilerplate in a dirty repository. Please commit your changes and try again." >&2 40 | exit 1 41 | fi 42 | 43 | # We glean the last boilerplate commit from the 44 | # last-boilerplate-commit file, which gets laid down by the main 45 | # `update` driver each time it runs. 46 | LBCF=${REPO_ROOT}/boilerplate/_data/last-boilerplate-commit 47 | if ! [[ -f "$LBCF" ]]; then 48 | echo "Couldn't discover last boilerplate commit! Assuming you're bootstrapping." 49 | exit 0 50 | fi 51 | LBC=$(cat $LBCF) 52 | 53 | # Download just that commit 54 | echo "Fetching $LBC from $BOILERPLATE_GIT_REPO" 55 | # boilerplate/update cleans up this temp dir 56 | TMPD=$(mktemp -d) 57 | cd $TMPD 58 | git init 59 | # TODO(efried): DRY this remote. Make it configurable? 60 | git remote add origin $BOILERPLATE_GIT_REPO 61 | git fetch origin $(cat $LBCF) --tags 62 | git reset --hard FETCH_HEAD 63 | 64 | # Now invoke the update script, overriding the source repository we've 65 | # just downloaded at the appropriate commit. 66 | # We invoke the script explicitly rather than via the make target to 67 | # close a security hole whereby the latter is overridden. 68 | echo "Running update" 69 | cd $REPO_ROOT 70 | BOILERPLATE_GIT_REPO="${TMPD}" boilerplate/update 71 | 72 | # Okay, if anything has changed, that's bad. 73 | if [[ $(git status --porcelain -- ':!build/Dockerfile*' | wc -l) -ne 0 ]]; then 74 | echo "Your boilerplate is dirty!" >&2 75 | git status --porcelain -- ':!build/Dockerfile*' 76 | exit 1 77 | fi 78 | 79 | echo "Your boilerplate is clean!" 80 | exit 0 81 | -------------------------------------------------------------------------------- /boilerplate/_lib/release.sh: -------------------------------------------------------------------------------- 1 | # Helpers and variables for dealing with openshift/release 2 | 3 | # NOTE: This library is sourced from user-run scripts. It should not be 4 | # sourced in CI, as it relies on git config that's not necessarily 5 | # present there. 6 | 7 | RELEASE_REPO=openshift/release 8 | 9 | ## Information about the boilerplate consumer 10 | # E.g. "openshift/my-wizbang-operator" 11 | CONSUMER=$(repo_name .) 12 | [[ -z "$CONSUMER" ]] && err " 13 | Failed to determine current repository name" 14 | # 15 | # E.g. "openshift" 16 | CONSUMER_ORG=${CONSUMER%/*} 17 | [[ -z "$CONSUMER_ORG" ]] && err " 18 | Failed to determine consumer org" 19 | # 20 | # E.g. "my-wizbang-operator" 21 | CONSUMER_NAME=${CONSUMER#*/} 22 | [[ -z "$CONSUMER_NAME" ]] && err " 23 | Failed to determine consumer name" 24 | # 25 | # E.g. "master" 26 | # This will produce something like refs/remotes/origin/master 27 | DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/upstream/HEAD 2>/dev/null || git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null || echo defaulting/to/master) 28 | # Strip off refs/remotes/{upstream|origin}/ 29 | DEFAULT_BRANCH=${DEFAULT_BRANCH##*/} 30 | [[ -z "$DEFAULT_BRANCH" ]] && err " 31 | Failed to determine default branch name" 32 | 33 | ## release_process_args "$@" 34 | # 35 | # This is for use by commands expecting one optional argument which is 36 | # the file system path to a clone of the $RELEASE_REPO. 37 | # 38 | # Will invoke `usage` -- which must be defined by the caller -- if 39 | # the wrong number of arguments are received, or if the single argument 40 | # is `help` or a flag. 41 | # 42 | # If exactly one argument is specified and it is valid, it is assigned 43 | # to the global RELEASE_CLONE variable. 44 | release_process_args() { 45 | if [[ $# -eq 1 ]]; then 46 | # Special cases for usage queries 47 | if [[ "$1" == '-'* ]] || [[ "$1" == help ]]; then 48 | usage 49 | fi 50 | 51 | [[ -d $1 ]] || err " 52 | $1: Not a directory." 53 | 54 | [[ $(repo_name $1) == "$RELEASE_REPO" ]] || err " 55 | $1 is not a clone of $RELEASE_REPO; or its 'origin' remote is not set properly." 56 | 57 | # Got a usable clone of openshift/release 58 | RELEASE_CLONE="$1" 59 | 60 | elif [[ $# -ne 0 ]]; then 61 | usage 62 | fi 63 | } 64 | 65 | ## release_validate_invocation 66 | # 67 | # Make sure we were called from a reasonable place, that being: 68 | # - A boilerplate consumer 69 | # - ...that's actually subscribed to a convention 70 | # - ...containing the script being invoked 71 | release_validate_invocation() { 72 | # Make sure we were invoked from a boilerplate consumer. 73 | [[ -z "$CONVENTION_NAME" ]] && err " 74 | $cmd must be invoked from a consumer of an appropriate convention. Where did you get this script from?" 75 | # Or at least not from boilerplate itself 76 | [[ "$CONSUMER" == "openshift/boilerplate" ]] && err " 77 | $cmd must be invoked from a boilerplate consumer, not from boilerplate itself." 78 | 79 | [[ -s $CONVENTION_ROOT/_data/last-boilerplate-commit ]] || err " 80 | $cmd must be invoked from a boilerplate consumer!" 81 | 82 | grep -E -q "^$CONVENTION_NAME(\s.*)?$" $CONVENTION_ROOT/update.cfg || err " 83 | $CONSUMER is not subscribed to $CONVENTION_NAME!" 84 | } 85 | 86 | ## release_prep_clone 87 | # 88 | # If $RELEASE_CLONE is already set: 89 | # - It should represent a directory containing a clean checkout of the 90 | # release repository; otherwise we error. 91 | # - We checkout and pull master. 92 | # Otherwise: 93 | # - We clone the release repo to a temporary directory. 94 | # - We set the $RELEASE_CLONE global variable to point to that 95 | # directory. 96 | release_prep_clone() { 97 | # If a release repo clone wasn't specified, create one 98 | if [[ -z "$RELEASE_CLONE" ]]; then 99 | RELEASE_CLONE=$(mktemp -dt openshift_release_XXXXXXX) 100 | git clone --depth=1 git@github.com:${RELEASE_REPO}.git $RELEASE_CLONE 101 | else 102 | [[ -z "$(git -C $RELEASE_CLONE status --porcelain)" ]] || err " 103 | Your release clone must start clean." 104 | # These will blow up if it's misconfigured 105 | git -C $RELEASE_CLONE checkout master 106 | git -C $RELEASE_CLONE pull 107 | fi 108 | } 109 | 110 | ## release_done_msg BRANCH 111 | # 112 | # Print exit instructions for submitting the release PR. 113 | # BRANCH is a suggested branch name. 114 | release_done_msg() { 115 | echo 116 | git status 117 | 118 | cat < $TMPD/$f 46 | echo $TMPD/$f 47 | return 48 | fi 49 | done 50 | } 51 | 52 | ## expected_prow_config ORG PROJ BRANCH 53 | # 54 | # Prints to stdout the expected prow configuration for the specified 55 | # ORG/PROJ. 56 | expected_prow_config() { 57 | local org=$1 58 | local consumer_name=$2 59 | local branch=$3 60 | # TODO: DRY this with what's in prow-config. 61 | # Do it by making it a template in the convention dir. 62 | cat <-test-harness. Quay repository must be created beforehand. | 34 | 35 | #### E2E Local Testing 36 | 37 | Please follow [this README](https://github.com/openshift/ops-sop/blob/master/v4/howto/osde2e/operator-test-harnesses.md#using-ginkgo) to run your e2e tests locally 38 | 39 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-${OPERATOR_NAME}-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | securityContext: 60 | runAsNonRoot: true 61 | allowPrivilegeEscalation: false 62 | capabilities: 63 | drop: ["ALL"] 64 | seccompProfile: 65 | type: RuntimeDefault 66 | env: 67 | - name: AD_HOC_TEST_IMAGES 68 | value: ${TEST_IMAGE}:${IMAGE_TAG} 69 | - name: OCM_CLIENT_ID 70 | value: ${OCM_CLIENT_ID} 71 | - name: OCM_CLIENT_SECRET 72 | value: ${OCM_CLIENT_SECRET} 73 | - name: OCM_CCS 74 | value: ${OCM_CCS} 75 | - name: AWS_ACCESS_KEY_ID 76 | value: ${AWS_ACCESS_KEY_ID} 77 | - name: AWS_SECRET_ACCESS_KEY 78 | value: ${AWS_SECRET_ACCESS_KEY} 79 | - name: CLOUD_PROVIDER_REGION 80 | value: ${CLOUD_PROVIDER_REGION} 81 | - name: GCP_CREDS_JSON 82 | value: ${GCP_CREDS_JSON} 83 | - name: LOG_BUCKET 84 | value: ${LOG_BUCKET} 85 | - name: USE_EXISTING_CLUSTER 86 | value: ${USE_EXISTING_CLUSTER} 87 | - name: CAD_PAGERDUTY_ROUTING_KEY 88 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 89 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | 4 | E2E_IMAGE_REGISTRY?=quay.io 5 | E2E_IMAGE_REPOSITORY?=app-sre 6 | E2E_IMAGE_NAME?=$(OPERATOR_NAME)-e2e 7 | 8 | 9 | REGISTRY_USER?=$(QUAY_USER) 10 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 11 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/standard.mk: -------------------------------------------------------------------------------- 1 | # Validate variables in project.mk exist 2 | ifndef OPERATOR_NAME 3 | $(error OPERATOR_NAME is not set; only operators should consume this convention; check project.mk file) 4 | endif 5 | ifndef E2E_IMAGE_REGISTRY 6 | $(error E2E_IMAGE_REGISTRY is not set; check project.mk file) 7 | endif 8 | ifndef E2E_IMAGE_REPOSITORY 9 | $(error E2E_IMAGE_REPOSITORY is not set; check project.mk file) 10 | endif 11 | 12 | # Use current commit as e2e image tag 13 | CURRENT_COMMIT=$(shell git rev-parse --short=7 HEAD) 14 | E2E_IMAGE_TAG=$(CURRENT_COMMIT) 15 | 16 | ### Accommodate docker or podman 17 | # 18 | # The docker/podman creds cache needs to be in a location unique to this 19 | # invocation; otherwise it could collide across jenkins jobs. We'll use 20 | # a .docker folder relative to pwd (the repo root). 21 | CONTAINER_ENGINE_CONFIG_DIR = .docker 22 | JENKINS_DOCKER_CONFIG_FILE = /var/lib/jenkins/.docker/config.json 23 | export REGISTRY_AUTH_FILE = ${CONTAINER_ENGINE_CONFIG_DIR}/config.json 24 | 25 | # If this configuration file doesn't exist, podman will error out. So 26 | # we'll create it if it doesn't exist. 27 | ifeq (,$(wildcard $(REGISTRY_AUTH_FILE))) 28 | $(shell mkdir -p $(CONTAINER_ENGINE_CONFIG_DIR)) 29 | # Copy the node container auth file so that we get access to the registries the 30 | # parent node has access to 31 | $(shell if test -f $(JENKINS_DOCKER_CONFIG_FILE); then cp $(JENKINS_DOCKER_CONFIG_FILE) $(REGISTRY_AUTH_FILE); fi) 32 | endif 33 | 34 | # ==> Docker uses --config=PATH *before* (any) subcommand; so we'll glue 35 | # that to the CONTAINER_ENGINE variable itself. (NOTE: I tried half a 36 | # dozen other ways to do this. This was the least ugly one that actually 37 | # works.) 38 | ifndef CONTAINER_ENGINE 39 | CONTAINER_ENGINE=$(shell command -v podman 2>/dev/null || echo docker --config=$(CONTAINER_ENGINE_CONFIG_DIR)) 40 | endif 41 | 42 | REGISTRY_USER ?= 43 | REGISTRY_TOKEN ?= 44 | 45 | # TODO: Figure out how to discover this dynamically 46 | OSDE2E_CONVENTION_DIR := boilerplate/openshift/golang-osd-operator-osde2e 47 | 48 | # log into quay.io 49 | .PHONY: container-engine-login 50 | container-engine-login: 51 | @test "${REGISTRY_USER}" != "" && test "${REGISTRY_TOKEN}" != "" || (echo "REGISTRY_USER and REGISTRY_TOKEN must be defined" && exit 1) 52 | mkdir -p ${CONTAINER_ENGINE_CONFIG_DIR} 53 | @${CONTAINER_ENGINE} login -u="${REGISTRY_USER}" -p="${REGISTRY_TOKEN}" quay.io 54 | 55 | ###################### 56 | # Targets used by e2e test suite 57 | ###################### 58 | 59 | # create binary 60 | .PHONY: e2e-binary-build 61 | e2e-binary-build: GOFLAGS_MOD=-mod=mod 62 | e2e-binary-build: GOENV=GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 GOFLAGS="${GOFLAGS_MOD}" 63 | e2e-binary-build: 64 | go mod tidy 65 | go test ./test/e2e -v -c --tags=osde2e -o e2e.test 66 | 67 | # push e2e image tagged as latest and as repo commit hash 68 | .PHONY: e2e-image-build-push 69 | e2e-image-build-push: container-engine-login 70 | ${CONTAINER_ENGINE} build --pull -f test/e2e/Dockerfile -t $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) . 71 | ${CONTAINER_ENGINE} tag $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 72 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) 73 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 74 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $CONVENTION_ROOT/_lib/common.sh 6 | 7 | # No PRE 8 | [[ "$1" == "PRE" ]] && exit 0 9 | 10 | # Expect POST 11 | [[ "$1" == "POST" ]] || err "Got a parameter I don't understand: '$1'. Did the infrastructure change?" 12 | 13 | REPO_ROOT=$(git rev-parse --show-toplevel) 14 | OPERATOR_NAME=$(sed -n 's/.*OperatorName .*=.*"\([^"]*\)".*/\1/p' "${REPO_ROOT}/config/config.go") 15 | E2E_SUITE_DIRECTORY=$REPO_ROOT/test/e2e 16 | 17 | # Update operator name in templates 18 | OPERATOR_UNDERSCORE_NAME=${OPERATOR_NAME//-/_} 19 | OPERATOR_PROPER_NAME=$(echo "$OPERATOR_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++){ $i=toupper(substr($i,1,1)) substr($i,2) }}1') 20 | OPERATOR_NAME_CAMEL_CASE=${OPERATOR_PROPER_NAME// /} 21 | 22 | mkdir -p "${E2E_SUITE_DIRECTORY}" 23 | 24 | E2E_SUITE_BUILDER_IMAGE=registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 25 | if [[ -n ${KONFLUX_BUILDS} ]]; then 26 | E2E_SUITE_BUILDER_IMAGE="brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_9_1.24" 27 | fi 28 | 29 | echo "syncing ${E2E_SUITE_DIRECTORY}/Dockerfile" 30 | tee "${E2E_SUITE_DIRECTORY}/Dockerfile" < /(path-to)/kubeconfig 101 | 102 | 5. Run test suite using 103 | 104 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 105 | EOF 106 | 107 | sed -e "s/\${OPERATOR_NAME}/${OPERATOR_NAME}/" $(dirname $0)/e2e-template.yml >"${E2E_SUITE_DIRECTORY}/e2e-template.yml" 108 | 109 | # todo: remove after file is renamed in ALL consumer repos 110 | if [ -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" ]; then 111 | rm -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" 112 | fi 113 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | name: __NAME__ 3 | namespace: __NAMESPACE__ 4 | tag: __TAG__ 5 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: no 4 | 5 | coverage: 6 | precision: 2 7 | round: down 8 | range: "20...100" 9 | 10 | status: 11 | project: no 12 | patch: no 13 | changes: no 14 | 15 | parsers: 16 | gcov: 17 | branch_detection: 18 | conditional: yes 19 | loop: yes 20 | method: no 21 | macro: no 22 | 23 | comment: 24 | layout: "reach,diff,flags,tree" 25 | behavior: default 26 | require_changes: no 27 | 28 | ignore: 29 | - "**/mocks" 30 | - "**/zz_generated*.go" 31 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/Dockerfile.olm-registry: -------------------------------------------------------------------------------- 1 | FROM registry.redhat.io/openshift4/ose-operator-registry-rhel9:v4.19 AS builder 2 | ARG SAAS_OPERATOR_DIR 3 | COPY ${SAAS_OPERATOR_DIR} manifests 4 | RUN initializer --permissive 5 | 6 | # ubi-micro does not work for clusters with fips enabled unless we make OpenSSL available 7 | FROM registry.access.redhat.com/ubi9/ubi-minimal:latest 8 | 9 | COPY --from=builder /bin/registry-server /bin/registry-server 10 | COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe 11 | COPY --from=builder /bin/initializer /bin/initializer 12 | 13 | WORKDIR /registry 14 | RUN chgrp -R 0 /registry && chmod -R g+rwx /registry 15 | 16 | USER 1001 17 | 18 | COPY --from=builder /registry /registry 19 | 20 | EXPOSE 50051 21 | 22 | CMD ["registry-server", "-t", "/tmp/terminate.log"] 23 | 24 | # Set the DC specific label for the location of the DC database in the image 25 | LABEL operators.operatorframework.io.index.database.v1=/registry/bundles.db 26 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ DO NOT EDIT ================================ 2 | # This file is managed in https://github.com/openshift/boilerplate 3 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 4 | # ============================================================================= 5 | aliases: 6 | srep-functional-team-aurora: 7 | - abyrne55 8 | - dakotalongRH 9 | - joshbranham 10 | - luis-falcon 11 | - reedcort 12 | srep-functional-team-fedramp: 13 | - theautoroboto 14 | - katherinelc321 15 | - rojasreinold 16 | - fsferraz-rh 17 | - jonahbrawley 18 | - digilink 19 | - annelson-rh 20 | - pheckenlWork 21 | - ironcladlou 22 | - MrSantamaria 23 | - PeterCSRE 24 | - cjnovak98 25 | srep-functional-team-hulk: 26 | - ravitri 27 | - devppratik 28 | - Tafhim 29 | - tkong-redhat 30 | - TheUndeadKing 31 | - vaidehi411 32 | - chamalabey 33 | - charlesgong 34 | - rbhilare 35 | srep-functional-team-orange: 36 | - bergmannf 37 | - Makdaam 38 | - Nikokolas3270 39 | - RaphaelBut 40 | - MateSaary 41 | - rolandmkunkel 42 | - petrkotas 43 | - zmird-r 44 | - hectorakemp 45 | srep-functional-team-rocket: 46 | - aliceh 47 | - anispate 48 | - clcollins 49 | - Mhodesty 50 | - nephomaniac 51 | - tnierman 52 | srep-functional-team-security: 53 | - jaybeeunix 54 | - sam-nguyen7 55 | - wshearn 56 | - dem4gus 57 | - npecka 58 | - pshickeydev 59 | - casey-williams-rh 60 | - boranx 61 | srep-functional-team-thor: 62 | - a7vicky 63 | - diakovnec 64 | - MitaliBhalla 65 | - feichashao 66 | - samanthajayasinghe 67 | - xiaoyu74 68 | - Tessg22 69 | - smarthall 70 | srep-infra-cicd: 71 | - ritmun 72 | - yiqinzhang 73 | - varunraokadaparthi 74 | srep-functional-leads: 75 | - abyrne55 76 | - clcollins 77 | - bergmannf 78 | - theautoroboto 79 | - smarthall 80 | - sam-nguyen7 81 | - ravitri 82 | srep-team-leads: 83 | - rafael-azevedo 84 | - iamkirkbater 85 | - rogbas 86 | - dustman9000 87 | - bng0y 88 | - bmeng 89 | - typeid 90 | sre-group-leads: 91 | - apahim 92 | - maorfr 93 | - rogbas 94 | srep-architects: 95 | - jharrington22 96 | - cblecker 97 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/app-sre-build-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ev 4 | 5 | usage() { 6 | cat < Generate Encrypted Password. 50 | # Even if you're not using quay, the pipeline expects these variables to 51 | # be named QUAY_* 52 | export QUAY_USER= 53 | export QUAY_TOKEN= 54 | 55 | # Tell the scripts where to find your fork of the SaaS bundle repository. 56 | # Except for the authentication part, this should correspond to what you see in the 57 | # https "clone" button in your fork. 58 | # Generate an access token via Settings => Access Tokens. Enable `write_repository`. 59 | # - {gitlab-user} is your username in gitlab 60 | # - {gitlab-token} is the authentication token you generated above 61 | # - {operator} is the name of the consumer repository, e.g. `deadmanssnitch-operator` 62 | export GIT_PATH=https://{gitlab-user}:{gitlab-token}@gitlab.cee.redhat.com/{gitlab-user}/saas-{operator}-bundle.git 63 | ``` 64 | 65 | ## Execute 66 | At this point you should be able to run 67 | ``` 68 | make build-push 69 | ``` 70 | 71 | This will create the following artifacts if it succeeds 72 | (`{hash}` is the 7-digit SHA of the current git commit in the repository under test): 73 | - Operator image in your personal operator repository, tagged `v{major}.{minor}.{commit-count}-{hash}` (e.g. `v0.1.228-e0b6129`) and `latest` 74 | - Two catalog images in your personal registry repository: 75 | - One image tagged `staging-{hash}` and `staging-latest` 76 | - The other tagged `production-{hash}` and `production-latest` 77 | - Two commits in your fork of the SaaS bundle repository: 78 | - One in the `staging` branch 79 | - The other in the `production` branch 80 | These are also present locally in a `saas-{operator-name}-bundle` subdirectory of your operator repository clone. 81 | You can inspect the artifacts therein to make sure e.g. the CSV was generated correctly. 82 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/codecov.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | REPO_ROOT=$(git rev-parse --show-toplevel) 8 | CI_SERVER_URL=https://prow.svc.ci.openshift.org/view/gcs/origin-ci-test 9 | COVER_PROFILE=${COVER_PROFILE:-coverage.out} 10 | JOB_TYPE=${JOB_TYPE:-"local"} 11 | 12 | # Default concurrency to four threads. By default it's the number of procs, 13 | # which seems to be 16 in the CI env. Some consumers' coverage jobs were 14 | # regularly getting OOM-killed; so do this rather than boost the pod resources 15 | # unreasonably. 16 | COV_THREAD_COUNT=${COV_THREAD_COUNT:-4} 17 | make -C "${REPO_ROOT}" go-test TESTOPTS="-coverprofile=${COVER_PROFILE}.tmp -covermode=atomic -coverpkg=./... -p ${COV_THREAD_COUNT}" 18 | 19 | # Remove generated files from coverage profile 20 | grep -v "zz_generated" "${COVER_PROFILE}.tmp" > "${COVER_PROFILE}" 21 | rm -f "${COVER_PROFILE}.tmp" 22 | 23 | # Configure the git refs and job link based on how the job was triggered via prow 24 | if [[ "${JOB_TYPE}" == "presubmit" ]]; then 25 | echo "detected PR code coverage job for #${PULL_NUMBER}" 26 | REF_FLAGS="-P ${PULL_NUMBER} -C ${PULL_PULL_SHA}" 27 | JOB_LINK="${CI_SERVER_URL}/pr-logs/pull/${REPO_OWNER}_${REPO_NAME}/${PULL_NUMBER}/${JOB_NAME}/${BUILD_ID}" 28 | elif [[ "${JOB_TYPE}" == "postsubmit" ]]; then 29 | echo "detected branch code coverage job for ${PULL_BASE_REF}" 30 | REF_FLAGS="-B ${PULL_BASE_REF} -C ${PULL_BASE_SHA}" 31 | JOB_LINK="${CI_SERVER_URL}/logs/${JOB_NAME}/${BUILD_ID}" 32 | elif [[ "${JOB_TYPE}" == "local" ]]; then 33 | echo "coverage report available at ${COVER_PROFILE}" 34 | exit 0 35 | else 36 | echo "${JOB_TYPE} jobs not supported" >&2 37 | exit 1 38 | fi 39 | 40 | # Configure certain internal codecov variables with values from prow. 41 | export CI_BUILD_URL="${JOB_LINK}" 42 | export CI_BUILD_ID="${JOB_NAME}" 43 | export CI_JOB_ID="${BUILD_ID}" 44 | 45 | if [[ "${JOB_TYPE}" != "local" ]]; then 46 | if [[ -z "${ARTIFACT_DIR:-}" ]] || [[ ! -d "${ARTIFACT_DIR}" ]] || [[ ! -w "${ARTIFACT_DIR}" ]]; then 47 | echo '${ARTIFACT_DIR} must be set for non-local jobs, and must point to a writable directory' >&2 48 | exit 1 49 | fi 50 | curl -sS https://codecov.io/bash -o "${ARTIFACT_DIR}/codecov.sh" 51 | bash <(cat "${ARTIFACT_DIR}/codecov.sh") -Z -K -f "${COVER_PROFILE}" -r "${REPO_OWNER}/${REPO_NAME}" ${REF_FLAGS} 52 | else 53 | bash <(curl -s https://codecov.io/bash) -Z -K -f "${COVER_PROFILE}" -r "${REPO_OWNER}/${REPO_NAME}" ${REF_FLAGS} 54 | fi 55 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/configure-fips.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPO_ROOT=$(git rev-parse --show-toplevel) 6 | CONVENTION_DIR="$REPO_ROOT/boilerplate/openshift/golang-osd-operator" 7 | PRE_V1_SDK_MANAGER_DIR="$REPO_ROOT/cmd/manager" 8 | 9 | if [[ -d "$PRE_V1_SDK_MANAGER_DIR" ]] 10 | then 11 | MAIN_DIR=$PRE_V1_SDK_MANAGER_DIR 12 | else 13 | MAIN_DIR=$REPO_ROOT 14 | fi 15 | 16 | echo "Writing fips file at $MAIN_DIR/fips.go" 17 | 18 | cp $CONVENTION_DIR/fips.go.tmplt "$MAIN_DIR/fips.go" 19 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/catalog-build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source `dirname $0`/common.sh 6 | 7 | usage() { echo "Usage: $0 -o operator-name -c saas-repository-channel -r registry-image" 1>&2; exit 1; } 8 | 9 | while getopts "o:c:r:" option; do 10 | case "${option}" in 11 | o) 12 | operator_name=${OPTARG} 13 | ;; 14 | c) 15 | operator_channel=${OPTARG} 16 | ;; 17 | r) 18 | # NOTE: This is the URL without the tag/digest 19 | registry_image=${OPTARG} 20 | ;; 21 | *) 22 | usage 23 | esac 24 | done 25 | 26 | # Detect the container engine to use, allowing override from the env 27 | CONTAINER_ENGINE=${CONTAINER_ENGINE:-$(command -v podman || command -v docker || true)} 28 | if [[ -z "$CONTAINER_ENGINE" ]]; then 29 | echo "WARNING: Couldn't find a container engine! Defaulting to docker." 30 | CONTAINER_ENGINE=docker 31 | fi 32 | 33 | # Checking parameters 34 | check_mandatory_params operator_channel operator_name 35 | 36 | # Parameters for the Dockerfile 37 | SAAS_OPERATOR_DIR="saas-${operator_name}-bundle" 38 | BUNDLE_DIR="${SAAS_OPERATOR_DIR}/${operator_name}" 39 | DOCKERFILE_REGISTRY="build/Dockerfile.olm-registry" 40 | 41 | # Checking SAAS_OPERATOR_DIR exist 42 | if [ ! -d "${SAAS_OPERATOR_DIR}/.git" ] ; then 43 | echo "${SAAS_OPERATOR_DIR} should exist and be a git repository" 44 | exit 1 45 | fi 46 | 47 | # Calculate new operator version from bundles inside the saas directory 48 | OPERATOR_NEW_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 1) 49 | 50 | # Create package yaml 51 | # This must be included in the registry build 52 | # `currentCSV` must reference the latest bundle version included. 53 | # Any version their after `currentCSV` loaded by the initalizer 54 | # will be silently pruned as it's not reachable 55 | PACKAGE_YAML_PATH="${BUNDLE_DIR}/${operator_name}.package.yaml" 56 | 57 | cat < "${PACKAGE_YAML_PATH}" 58 | packageName: ${operator_name} 59 | channels: 60 | - name: ${operator_channel} 61 | currentCSV: ${operator_name}.v${OPERATOR_NEW_VERSION} 62 | EOF 63 | 64 | TAG="${operator_channel}-latest" 65 | if [[ "${RELEASE_BRANCHED_BUILDS}" ]]; then 66 | TAG="v${OPERATOR_NEW_VERSION}" 67 | fi 68 | 69 | ${CONTAINER_ENGINE} build --pull -f "${DOCKERFILE_REGISTRY}" --build-arg "SAAS_OPERATOR_DIR=${SAAS_OPERATOR_DIR}" --tag "${registry_image}:${TAG}" . 70 | 71 | if [ $? -ne 0 ] ; then 72 | echo "docker build failed, exiting..." 73 | exit 1 74 | fi 75 | 76 | # TODO : Test the image and the version it contains 77 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/catalog-publish.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source `dirname $0`/common.sh 6 | 7 | usage() { echo "Usage: $0 -o operator-name -c saas-repository-channel -r registry-image -H operator-commit-hash -n operator-commit-number [-p]" 1>&2; exit 1; } 8 | 9 | while getopts "o:c:n:H:pr:" option; do 10 | case "${option}" in 11 | c) 12 | operator_channel=${OPTARG} 13 | ;; 14 | H) 15 | operator_commit_hash=${OPTARG} 16 | ;; 17 | n) 18 | operator_commit_number=${OPTARG} 19 | ;; 20 | o) 21 | operator_name=${OPTARG} 22 | ;; 23 | p) 24 | push_catalog=true 25 | ;; 26 | r) 27 | # NOTE: This is the URL without the tag/digest 28 | registry_image=${OPTARG} 29 | ;; 30 | *) 31 | usage 32 | esac 33 | done 34 | 35 | # Checking parameters 36 | check_mandatory_params operator_channel operator_name operator_commit_hash operator_commit_number registry_image 37 | 38 | # Calculate previous version 39 | SAAS_OPERATOR_DIR="saas-${operator_name}-bundle" 40 | BUNDLE_DIR="${SAAS_OPERATOR_DIR}/${operator_name}" 41 | OPERATOR_NEW_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 1) 42 | OPERATOR_PREV_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 2 | head -n 1) 43 | 44 | if [[ "$OPERATOR_NEW_VERSION" == "$OPERATOR_PREV_VERSION" ]]; then 45 | echo "New version and previous version are identical. Exiting." 46 | exit 1 47 | fi 48 | 49 | # Get container engine 50 | CONTAINER_ENGINE=$(command -v podman || command -v docker || true) 51 | [[ -n "$CONTAINER_ENGINE" ]] || echo "WARNING: Couldn't find a container engine. Assuming you already in a container, running unit tests." >&2 52 | 53 | # Set SRC container transport based on container engine 54 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]]; then 55 | SRC_CONTAINER_TRANSPORT="containers-storage" 56 | else 57 | SRC_CONTAINER_TRANSPORT="docker-daemon" 58 | fi 59 | 60 | # Checking SAAS_OPERATOR_DIR exist 61 | if [ ! -d "${SAAS_OPERATOR_DIR}/.git" ] ; then 62 | echo "${SAAS_OPERATOR_DIR} should exist and be a git repository" 63 | exit 1 64 | fi 65 | 66 | # Read the bundle version we're attempting to publish 67 | # in the OLM catalog from the package yaml 68 | PACKAGE_YAML_PATH="${BUNDLE_DIR}/${operator_name}.package.yaml" 69 | PACKAGE_YAML_VERSION=$(awk '$1 == "currentCSV:" {print $2}' ${PACKAGE_YAML_PATH}) 70 | 71 | # Ensure we're commiting and pushing the version we think we are pushing 72 | # Since we build the bundle in catalog-build.sh this script could be run 73 | # independently and push a version we're not expecting. 74 | # if ! [ "${operator_name}.v${OPERATOR_NEW_VERSION}" = "${PACKAGE_YAML_VERSION}" ]; then 75 | # echo "You are attemping to push a bundle that's pointing to a version of this catalog you are not building" 76 | # echo "You are building version: ${operator_name}.v${OPERATOR_NEW_VERSION}" 77 | # echo "Your local package yaml version is: ${PACKAGE_YAML_VERSION}" 78 | # exit 1 79 | # fi 80 | 81 | # add, commit & push 82 | pushd "${SAAS_OPERATOR_DIR}" 83 | 84 | git add . 85 | 86 | MESSAGE="add version ${operator_commit_number}-${operator_commit_hash} 87 | 88 | replaces ${OPERATOR_PREV_VERSION} 89 | removed versions: ${REMOVED_VERSIONS}" 90 | 91 | git commit -m "${MESSAGE}" 92 | git push origin HEAD 93 | 94 | if [ $? -ne 0 ] ; then 95 | echo "git push failed, exiting..." 96 | exit 1 97 | fi 98 | 99 | popd 100 | 101 | if [ "$push_catalog" = true ] ; then 102 | # push image 103 | if [[ "${RELEASE_BRANCHED_BUILDS}" ]]; then 104 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 105 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:v${OPERATOR_NEW_VERSION}" \ 106 | "docker://${registry_image}:v${OPERATOR_NEW_VERSION}" 107 | 108 | if [ $? -ne 0 ] ; then 109 | echo "skopeo push of ${registry_image}:v${OPERATOR_NEW_VERSION}-latest failed, exiting..." 110 | exit 1 111 | fi 112 | 113 | exit 0 114 | fi 115 | 116 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 117 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:${operator_channel}-latest" \ 118 | "docker://${registry_image}:${operator_channel}-latest" 119 | 120 | if [ $? -ne 0 ] ; then 121 | echo "skopeo push of ${registry_image}:${operator_channel}-latest failed, exiting..." 122 | exit 1 123 | fi 124 | 125 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 126 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:${operator_channel}-latest" \ 127 | "docker://${registry_image}:${operator_channel}-${operator_commit_hash}" 128 | 129 | if [ $? -ne 0 ] ; then 130 | echo "skopeo push of ${registry_image}:${operator_channel}-${operator_commit_hash} failed, exiting..." 131 | exit 1 132 | fi 133 | fi 134 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | function check_mandatory_params() { 7 | local csv_missing_param_error 8 | local param_name 9 | local param_val 10 | for param_name in "$@"; do 11 | eval param_val=\$$param_name 12 | if [ -z "$param_val" ]; then 13 | echo "Missing $param_name parameter" 14 | csv_missing_param_error=true 15 | fi 16 | done 17 | if [ ! -z "$csv_missing_param_error" ]; then 18 | usage 19 | fi 20 | } 21 | 22 | # generateImageDigest returns the image URI as repo URL + image digest 23 | function generateImageDigest() { 24 | local param_image 25 | local param_version 26 | local image_digest 27 | 28 | param_image="$1" 29 | param_version="$2" 30 | if [[ -z $param_image || -z $param_version ]]; then 31 | usage 32 | fi 33 | 34 | image_digest=$(skopeo inspect docker://${param_image}:v${param_version} | jq -r .Digest) 35 | if [[ -z "$image_digest" ]]; then 36 | echo "Couldn't discover IMAGE_DIGEST for docker://${param_image}:v${param_version}!" 37 | exit 1 38 | fi 39 | 40 | echo "${param_image}@${image_digest}" 41 | } 42 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/csv-generate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: staging-csv-build 2 | staging-csv-build: 3 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 4 | 5 | .PHONY: staging-catalog-build 6 | staging-catalog-build: 7 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c staging -r ${REGISTRY_IMAGE} 8 | 9 | .PHONY: staging-saas-bundle-push 10 | staging-saas-bundle-push: 11 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 12 | 13 | .PHONY: staging-catalog-publish 14 | staging-catalog-publish: 15 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 16 | 17 | .PHONY: staging-catalog-build-and-publish 18 | staging-catalog-build-and-publish: 19 | @$(MAKE) -s staging-csv-build --no-print-directory 20 | @$(MAKE) -s staging-catalog-build --no-print-directory 21 | @$(MAKE) -s staging-catalog-publish --no-print-directory 22 | 23 | .PHONY: production-hack-csv-build 24 | production-hack-csv-build: 25 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) -g hack 26 | 27 | .PHONY: production-csv-build 28 | production-csv-build: 29 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 30 | 31 | .PHONY: production-catalog-build 32 | production-catalog-build: 33 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c production -r ${REGISTRY_IMAGE} 34 | 35 | .PHONY: production-saas-bundle-push 36 | production-saas-bundle-push: 37 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 38 | 39 | .PHONY: production-catalog-publish 40 | production-catalog-publish: 41 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 42 | 43 | .PHONY: production-catalog-build-and-publish 44 | production-catalog-build-and-publish: 45 | @$(MAKE) -s production-csv-build --no-print-directory 46 | @$(MAKE) -s production-catalog-build --no-print-directory 47 | @$(MAKE) -s production-catalog-publish --no-print-directory 48 | 49 | .PHONY: stable-csv-build 50 | stable-csv-build: 51 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 52 | 53 | .PHONY: stable-catalog-build 54 | stable-catalog-build: 55 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c stable -r ${REGISTRY_IMAGE} 56 | 57 | .PHONY: stable-saas-bundle-push 58 | stable-saas-bundle-push: 59 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 60 | 61 | .PHONY: stable-catalog-publish 62 | stable-catalog-publish: 63 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 64 | 65 | .PHONY: stable-catalog-build-and-publish 66 | stable-catalog-build-and-publish: 67 | @$(MAKE) -s stable-csv-build --no-print-directory 68 | @$(MAKE) -s stable-catalog-build --no-print-directory 69 | @$(MAKE) -s stable-catalog-publish --no-print-directory 70 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "docker" 4 | directory: "/build" 5 | labels: 6 | - "area/dependency" 7 | - "ok-to-test" 8 | schedule: 9 | interval: "weekly" 10 | ignore: 11 | - dependency-name: "redhat-services-prod/openshift/boilerplate" 12 | # don't upgrade boilerplate via these means 13 | - dependency-name: "openshift4/ose-operator-registry" 14 | # don't upgrade ose-operator-registry via these means 15 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/ensure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | if [ "$BOILERPLATE_SET_X" ]; then 5 | set -x 6 | fi 7 | 8 | REPO_ROOT=$(git rev-parse --show-toplevel) 9 | source $REPO_ROOT/boilerplate/_lib/common.sh 10 | 11 | GOLANGCI_LINT_VERSION="2.0.2" 12 | OPM_VERSION="v1.23.2" 13 | GRPCURL_VERSION="1.7.0" 14 | DEPENDENCY=${1:-} 15 | GOOS=$(go env GOOS) 16 | 17 | case "${DEPENDENCY}" in 18 | 19 | golangci-lint) 20 | GOPATH=$(go env GOPATH) 21 | if which golangci-lint ; then 22 | exit 23 | else 24 | mkdir -p "${GOPATH}/bin" 25 | if ! echo "${PATH}" | grep -q "${GOPATH}/bin"; then 26 | echo "${GOPATH}/bin not in $PATH" 27 | exit 1 28 | fi 29 | DOWNLOAD_URL="https://github.com/golangci/golangci-lint/releases/download/v${GOLANGCI_LINT_VERSION}/golangci-lint-${GOLANGCI_LINT_VERSION}-${GOOS}-amd64.tar.gz" 30 | curl -sfL "${DOWNLOAD_URL}" | tar -C "${GOPATH}/bin" -zx --strip-components=1 "golangci-lint-${GOLANGCI_LINT_VERSION}-${GOOS}-amd64/golangci-lint" 31 | fi 32 | ;; 33 | 34 | opm) 35 | mkdir -p .opm/bin 36 | cd .opm/bin 37 | 38 | if [[ -x ./opm && "$(opm_version ./opm)" == "$OPM_VERSION" ]]; then 39 | exit 0 40 | fi 41 | 42 | if which opm && [[ "$(opm_version $(which opm))" == "$OPM_VERSION" ]]; then 43 | opm=$(realpath $(which opm)) 44 | else 45 | opm="opm-$OPM_VERSION-$GOOS-amd64" 46 | opm_download_url="https://github.com/operator-framework/operator-registry/releases/download/$OPM_VERSION/$GOOS-amd64-opm" 47 | curl -sfL "${opm_download_url}" -o "$opm" 48 | chmod +x "$opm" 49 | fi 50 | 51 | ln -fs "$opm" opm 52 | ;; 53 | 54 | grpcurl) 55 | mkdir -p .grpcurl/bin 56 | cd .grpcurl/bin 57 | 58 | if [[ -x ./grpcurl && "$(grpcurl_version ./grpcurl)" == "$GRPCURL_VERSION" ]]; then 59 | exit 0 60 | fi 61 | 62 | if which grpcurl && [[ "$(grpcurl_version $(which grpcurl))" == "$GRPCURL_VERSION" ]]; then 63 | grpcurl=$(realpath $(which grpcurl)) 64 | else 65 | # mapping from https://github.com/fullstorydev/grpcurl/blob/master/.goreleaser.yml 66 | [[ "$GOOS" == "darwin" ]] && os=osx || os="$GOOS" 67 | grpcurl="grpcurl-$GRPCURL_VERSION-$os-x86_64" 68 | grpcurl_download_url="https://github.com/fullstorydev/grpcurl/releases/download/v$GRPCURL_VERSION/grpcurl_${GRPCURL_VERSION}_${os}_x86_64.tar.gz" 69 | curl -sfL "$grpcurl_download_url" | tar -xzf - -O grpcurl > "$grpcurl" 70 | chmod +x "$grpcurl" 71 | fi 72 | 73 | ln -fs "$grpcurl" grpcurl 74 | ;; 75 | 76 | venv) 77 | # Set up a python virtual environment 78 | python3 -m venv .venv 79 | # Install required libs, if a requirements file was given 80 | if [[ -n "$2" ]]; then 81 | .venv/bin/python3 -m pip install -r "$2" 82 | fi 83 | ;; 84 | 85 | *) 86 | echo "Unknown dependency: ${DEPENDENCY}" 87 | exit 1 88 | ;; 89 | esac 90 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/fips.go.tmplt: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | concurrency: 10 4 | linters: 5 | default: none 6 | enable: 7 | - errcheck 8 | - gosec 9 | - govet 10 | - ineffassign 11 | - misspell 12 | - staticcheck 13 | - unused 14 | settings: 15 | misspell: 16 | extra-words: 17 | - typo: openshit 18 | correction: OpenShift 19 | exclusions: 20 | generated: lax 21 | presets: 22 | - comments 23 | - common-false-positives 24 | - legacy 25 | - std-error-handling 26 | paths: 27 | - third_party/ 28 | - builtin/ 29 | - examples/ 30 | issues: 31 | max-issues-per-linter: 0 32 | max-same-issues: 0 33 | formatters: 34 | exclusions: 35 | generated: lax 36 | paths: 37 | - third_party/ 38 | - builtin/ 39 | - examples/ 40 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/migrate_build_pipeline.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import sys 3 | 4 | file_path = sys.argv[1] 5 | with open(file_path, 'r') as f: 6 | data = yaml.safe_load(f) 7 | 8 | spec = data.get('spec', {}) 9 | 10 | # Remove pipelineSpec and taskRunSpecs 11 | spec.pop('pipelineSpec', None) 12 | spec.pop('taskRunSpecs', None) 13 | 14 | # Add pipelineRef 15 | spec['pipelineRef'] = { 16 | 'resolver': 'git', 17 | 'params': [ 18 | {'name': 'url', 'value': 'https://github.com/openshift/boilerplate'}, 19 | {'name': 'revision', 'value': 'master'}, 20 | {'name': 'pathInRepo', 'value': 'pipelines/docker-build-oci-ta/pipeline.yaml'} 21 | ] 22 | } 23 | 24 | # Write back 25 | with open(file_path, 'w') as f: 26 | yaml.dump(data, f, default_flow_style=False, sort_keys=False) 27 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | OPERATOR_NAMESPACE?=$(shell sed -n 's/.*OperatorNamespace .*"\([^"]*\)".*/\1/p' config/config.go) 4 | 5 | IMAGE_REGISTRY?=quay.io 6 | IMAGE_REPOSITORY?=app-sre 7 | IMAGE_NAME?=$(OPERATOR_NAME) 8 | 9 | # Optional additional deployment image 10 | SUPPLEMENTARY_IMAGE_NAME?=$(shell sed -n 's/.*SupplementaryImage .*"\([^"]*\)".*/\1/p' config/config.go) 11 | 12 | # Optional: Enable OLM skip-range 13 | # https://v0-18-z.olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/#skiprange 14 | EnableOLMSkipRange?=$(shell sed -n 's/.*EnableOLMSkipRange .*"\([^"]*\)".*/\1/p' config/config.go) 15 | 16 | VERSION_MAJOR?=0 17 | VERSION_MINOR?=1 18 | 19 | ifdef RELEASE_BRANCHED_BUILDS 20 | # Make sure all called shell scripts know what's up 21 | export RELEASE_BRANCHED_BUILDS 22 | 23 | # RELEASE_BRANCH from env vars takes precedence; if not set, try to figure it out 24 | RELEASE_BRANCH:=${RELEASE_BRANCH} 25 | ifneq ($(RELEASE_BRANCH),) 26 | # Sanity check, just to be nice 27 | RELEASE_BRANCH_TEST := $(shell echo ${RELEASE_BRANCH} | grep -E '^release-[0-9]+\.[0-9]+$$') 28 | ifeq ($(RELEASE_BRANCH_TEST),) 29 | $(warning Provided RELEASE_BRANCH doesn't conform to "release-X.Y" pattern; you sure you didn't make a mistake?) 30 | endif 31 | endif 32 | 33 | ifeq ($(RELEASE_BRANCH),) 34 | # Check git repo's branch first 35 | RELEASE_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | grep -E '^release-[0-9]+\.[0-9]+$$') 36 | endif 37 | 38 | ifeq ($(RELEASE_BRANCH),) 39 | # Try to parse it out of Jenkins' JOB_NAME 40 | RELEASE_BRANCH := $(shell echo ${JOB_NAME} | grep -E --only-matching 'release-[0-9]+\.[0-9]+') 41 | endif 42 | 43 | ifeq ($(RELEASE_BRANCH),) 44 | $(error RELEASE_BRANCHED_BUILDS is set, but couldn't detect a release branch and RELEASE_BRANCH is not set; giving up) 45 | else 46 | SEMVER := $(subst release-,,$(subst ., ,$(RELEASE_BRANCH))) 47 | VERSION_MAJOR := $(firstword $(SEMVER)) 48 | VERSION_MINOR := $(lastword $(SEMVER)) 49 | endif 50 | endif 51 | 52 | REGISTRY_USER?=$(QUAY_USER) 53 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 54 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/prow-config: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPO_ROOT=$(git rev-parse --show-toplevel) 6 | source $REPO_ROOT/boilerplate/_lib/common.sh 7 | source $REPO_ROOT/boilerplate/_lib/release.sh 8 | 9 | cmd=${0##*/} 10 | 11 | usage() { 12 | cat < $config_dir/$config 54 | build_root: 55 | from_repository: true 56 | images: 57 | - dockerfile_path: build/Dockerfile 58 | to: unused 59 | resources: 60 | '*': 61 | limits: 62 | memory: 4Gi 63 | requests: 64 | cpu: 100m 65 | memory: 200Mi 66 | tests: 67 | - as: e2e-binary-build-success 68 | commands: | 69 | make e2e-binary-build 70 | container: 71 | from: src 72 | run_if_changed: ^(test/e2e/\.*|go\.mod|go\.sum)$ 73 | - as: coverage 74 | commands: | 75 | export CODECOV_TOKEN=\$(cat /tmp/secret/CODECOV_TOKEN) 76 | make coverage 77 | container: 78 | from: src 79 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 80 | secret: 81 | mount_path: /tmp/secret 82 | name: ${CONSUMER_NAME}-codecov-token 83 | - as: publish-coverage 84 | commands: | 85 | export CODECOV_TOKEN=\$(cat /tmp/secret/CODECOV_TOKEN) 86 | make coverage 87 | container: 88 | from: src 89 | postsubmit: true 90 | secret: 91 | mount_path: /tmp/secret 92 | name: ${CONSUMER_NAME}-codecov-token 93 | - as: lint 94 | commands: make lint 95 | container: 96 | from: src 97 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 98 | - as: test 99 | commands: make test 100 | container: 101 | from: src 102 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 103 | - as: validate 104 | commands: make validate 105 | container: 106 | from: src 107 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 108 | zz_generated_metadata: 109 | branch: ${DEFAULT_BRANCH} 110 | org: ${CONSUMER_ORG} 111 | repo: ${CONSUMER_NAME} 112 | EOF 113 | 114 | make jobs 115 | 116 | release_done_msg $release_branch 117 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/py-requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml>=5.3.1 2 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/rvmo-bundle.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPOSITORY=${REPOSITORY:-"https://github.com/openshift/managed-release-bundle-osd.git"} 6 | CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD|egrep '^main$|^release-[0-9]+\.[0-9]+$'|cat) 7 | RVMO_BRANCH=${CURRENT_BRANCH:-main} 8 | # You can override any branch detection by setting RELEASE_BRANCH 9 | BRANCH=${RELEASE_BRANCH:-$RVMO_BRANCH} 10 | DELETE_TEMP_DIR=${DELETE_TEMP_DIR:-true} 11 | TMPD=$(mktemp -d -t rvmo-bundle.XXXXXX) 12 | [[ "${DELETE_TEMP_DIR}" == "true" ]] && trap 'rm -rf ${TMPD}' EXIT 13 | 14 | cd "${TMPD}" 15 | echo "Cloning RVMO from ${REPOSITORY}:${BRANCH}" 16 | git clone --single-branch -b "${BRANCH}" "${REPOSITORY}" . 17 | bash hack/update-operator-release.sh 18 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/validate-yaml.py: -------------------------------------------------------------------------------- 1 | # Usage 2 | # python validate-yaml.py path/to/file/or/dir 3 | 4 | import sys 5 | import yaml 6 | from os import listdir 7 | from os.path import isdir, isfile, join, splitext 8 | 9 | usage = "Usage: {0:s} path/to/file/or/dir...".format(sys.argv[0]) 10 | 11 | if len(sys.argv) < 2: 12 | print(usage) 13 | sys.exit(0) 14 | 15 | input_paths = sys.argv[1:] 16 | 17 | error = False 18 | 19 | for path in input_paths: 20 | if isfile(path): 21 | files = [path] 22 | elif isdir(path): 23 | files = [join(path, f) for f in listdir(path) if isfile(join(path, f))] 24 | else: 25 | print("Path {0:s} does not exist".format(path)) 26 | error=True 27 | continue 28 | 29 | for file_path in files: 30 | _, ext = splitext(file_path) 31 | if ext not in [".yml", ".yaml"]: 32 | continue 33 | 34 | print("Validating YAML {}".format(file_path)) 35 | with open(file_path, "r") as f: 36 | data = f.read() 37 | try: 38 | for y in yaml.safe_load_all(data): 39 | pass 40 | except Exception as e: 41 | print(e) 42 | error = True 43 | 44 | sys.exit(error) 45 | -------------------------------------------------------------------------------- /boilerplate/update.cfg: -------------------------------------------------------------------------------- 1 | openshift/golang-osd-operator 2 | openshift/golang-osd-e2e 3 | -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/redhat-services-prod/openshift/boilerplate:image-v8.2.0 AS builder 2 | 3 | RUN mkdir -p /workdir 4 | WORKDIR /workdir 5 | COPY go.mod go.sum ./ 6 | RUN go mod download 7 | COPY . . 8 | RUN make go-build 9 | 10 | #### 11 | FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6-1760515502 12 | 13 | ENV USER_UID=1001 \ 14 | USER_NAME=cloud-ingress-operator 15 | 16 | COPY --from=builder /workdir/build/_output/bin/* /usr/local/bin/ 17 | 18 | COPY build/bin /usr/local/bin 19 | RUN /usr/local/bin/user_setup 20 | 21 | ENTRYPOINT ["/usr/local/bin/entrypoint"] 22 | 23 | USER ${USER_UID} 24 | 25 | LABEL io.openshift.managed.name="cloud-ingress-operator" \ 26 | io.openshift.managed.description="Operator to manage cloud ingress." 27 | -------------------------------------------------------------------------------- /build/Dockerfile.olm-registry: -------------------------------------------------------------------------------- 1 | FROM registry.redhat.io/openshift4/ose-operator-registry-rhel9:v4.19 AS builder 2 | ARG SAAS_OPERATOR_DIR 3 | COPY ${SAAS_OPERATOR_DIR} manifests 4 | RUN initializer --permissive 5 | 6 | # ubi-micro does not work for clusters with fips enabled unless we make OpenSSL available 7 | FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6-1760515502 8 | 9 | COPY --from=builder /bin/registry-server /bin/registry-server 10 | COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe 11 | COPY --from=builder /bin/initializer /bin/initializer 12 | 13 | WORKDIR /registry 14 | RUN chgrp -R 0 /registry && chmod -R g+rwx /registry 15 | 16 | USER 1001 17 | 18 | COPY --from=builder /registry /registry 19 | 20 | EXPOSE 50051 21 | 22 | CMD ["registry-server", "-t", "/tmp/terminate.log"] 23 | 24 | # Set the DC specific label for the location of the DC database in the image 25 | LABEL operators.operatorframework.io.index.database.v1=/registry/bundles.db 26 | -------------------------------------------------------------------------------- /build/bin/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | # This is documented here: 4 | # https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines 5 | 6 | if ! whoami &>/dev/null; then 7 | if [ -w /etc/passwd ]; then 8 | echo "${USER_NAME:-cloud-ingress-operator}:x:$(id -u):$(id -g):${USER_NAME:-cloud-ingress-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd 9 | fi 10 | fi 11 | 12 | exec ${OPERATOR} $@ 13 | -------------------------------------------------------------------------------- /build/bin/user_setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) 5 | mkdir -p ${HOME} 6 | chown ${USER_UID}:0 ${HOME} 7 | chmod ug+rwx ${HOME} 8 | 9 | # runtime user will need to be able to self-insert in /etc/passwd 10 | chmod g+rw /etc/passwd 11 | 12 | # no need for this script to remain in the image after running 13 | rm $0 14 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | const ( 4 | // AdminAPIName is the name of the API endpoint for non-customer use (eg Hive) 5 | AdminAPIName string = "rh-api" 6 | 7 | // AdminAPISecurityGroupName is the name of the Security Group for the Admin API 8 | AdminAPISecurityGroupName string = "rh-api" 9 | 10 | // CloudAdminAPILoadBalancerName is the cloud provider identifier for the load 11 | // balancer for admin API endpoint 12 | CloudAdminAPILoadBalancerName string = "rh-api" 13 | 14 | // CustomerAPIName is the name of the API endpoint for customer use 15 | CustomerAPIName string = "api" 16 | 17 | // ExternalCloudAPILBNameSuffix is the cloud provider name suffix (eg aext, ext, 18 | // aint) for the default external API load balancer. This is not used by 19 | // AdminAPIName 20 | ExternalCloudAPILBNameSuffix string = "ext" 21 | 22 | // InternalCloudAPILBNameSuffix is the cloud provider name suffix (eg aext, ext, 23 | // aint) for the default internal API load balancer. 24 | InternalCloudAPILBNameSuffix string = "int" 25 | 26 | // InternalServicesTargetGroupSuffix internal services target group suffix 27 | InternalServicesTargetGroupSuffix string = "sint" 28 | // InternalAPITargetGroupSuffix internal api target group suffix 29 | InternalAPITargetGroupSuffix string = "aint" 30 | // ExternalAPITargetGroupSuffix external api target group suffix 31 | ExternalAPITargetGroupSuffix string = "aext" 32 | 33 | // OperatorName is the name of this operator 34 | OperatorName string = "cloud-ingress-operator" 35 | 36 | // KubeConfigNamespace is where to find the cluster-config 37 | KubeConfigNamespace string = "kube-system" 38 | 39 | // KubeConfigConfigMapName is the config blob for the cluster, containing region 40 | // availability zone, networking information, base domain, cluster name and more 41 | KubeConfigConfigMapName string = "cluster-config-v1" 42 | 43 | // AdminAPIListenerPort 44 | AdminAPIListenerPort int64 = 6443 45 | 46 | // MaxAPIRetries 47 | MaxAPIRetries int = 10 48 | 49 | // AWSSecretName 50 | AWSSecretName string = "cloud-ingress-operator-credentials-aws" //#nosec G101 -- This is a false positive 51 | 52 | // GCPSecretName 53 | GCPSecretName string = "cloud-ingress-operator-credentials-gcp" //#nosec G101 -- This is a false positive 54 | 55 | // OperatorNamespace 56 | OperatorNamespace string = "openshift-cloud-ingress-operator" 57 | 58 | // olm.skipRange annotation added to CSV --SREP-96 59 | EnableOLMSkipRange string = "true" 60 | ) 61 | -------------------------------------------------------------------------------- /config/metadata/additional-labels.txt: -------------------------------------------------------------------------------- 1 | LABEL com.redhat.component="openshift-cloud-ingress-operator" io.k8s.description="..." description="..." distribution-scope="public" name="openshift/cloud-ingress-operator" url="https://github.com/openshift/cloud-ingress-operator" vendor="Red Hat, Inc." release="v0.0.0" version="v0.0.0" 2 | -------------------------------------------------------------------------------- /config/templates/csv-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | name: cloud-ingress-operator-0.0.1 5 | namespace: placeholder 6 | annotations: 7 | categories: A list of comma separated categories that your operator falls under. 8 | certified: "false" 9 | description: Operator to Manage Cloud Ingress for OpenShift Clusters. 10 | containerImage: quay.io/app-sre/cloud-ingress-operator:latest 11 | createdAt: "2019-04-10T17:34:33Z" 12 | support: Red Hat SRE 13 | spec: 14 | displayName: cloud-ingress-operator 15 | description: Operator to Manage Cloud Ingress for OpenShift Clusters. 16 | keywords: 17 | - kubernetes 18 | - ingress 19 | - openshift 20 | - multi-cluster 21 | - cluster 22 | version: 0.0.1 23 | provider: 24 | name: Red Hat, Inc 25 | maturity: alpha 26 | installModes: 27 | - type: OwnNamespace 28 | supported: true 29 | - type: SingleNamespace 30 | supported: true 31 | - type: MultiNamespace 32 | supported: false 33 | - type: AllNamespaces 34 | supported: false 35 | install: 36 | strategy: deployment 37 | spec: 38 | clusterPermissions: 39 | - serviceAccountName: cloud-ingress-operator 40 | # Rules will be added here by the generate-csv.py script. 41 | deployments: 42 | - name: cloud-ingress-operator 43 | # Deployment spec will be added here by the generate-csv.py script. 44 | customresourcedefinitions: 45 | owned: 46 | # CRD's will be added here by the generate-operator-bundle.py 47 | -------------------------------------------------------------------------------- /controllers/apischeme/apischeme_controller_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package apischeme 18 | 19 | import ( 20 | "testing" 21 | 22 | . "github.com/onsi/ginkgo" 23 | . "github.com/onsi/gomega" 24 | //+kubebuilder:scaffold:imports 25 | ) 26 | 27 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 28 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 29 | 30 | func TestAPIs(t *testing.T) { 31 | RegisterFailHandler(Fail) 32 | 33 | RunSpecs(t, "Controller Suite") 34 | } 35 | -------------------------------------------------------------------------------- /controllers/apischeme/apischeme_controller_test.go: -------------------------------------------------------------------------------- 1 | package apischeme 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/openshift/cloud-ingress-operator/pkg/testutils" 8 | 9 | baseutils "github.com/openshift/cloud-ingress-operator/pkg/utils" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | ) 12 | 13 | func TestClusterBaseDomain(t *testing.T) { 14 | aObj := testutils.CreateAPISchemeObject("rh-api", true, []string{"0.0.0.0/0"}) 15 | masterNames := make([]string, 3) 16 | for i := 0; i < 3; i++ { 17 | masterNames[i] = fmt.Sprintf("master-%d", i) 18 | } 19 | machineList, _ := testutils.CreateMachineObjectList(masterNames, "basename", "master", testutils.DefaultRegionName, testutils.DefaultAzName) 20 | infraObj := testutils.CreateInfraObject("basename", testutils.DefaultAPIEndpoint, testutils.DefaultAPIEndpoint, testutils.DefaultRegionName) 21 | objs := []runtime.Object{aObj, infraObj, machineList} 22 | mocks := testutils.NewTestMock(t, objs) 23 | 24 | base, err := baseutils.GetClusterBaseDomain(mocks.FakeKubeClient) 25 | if err != nil { 26 | t.Fatalf("Could not get cluster base domain name: %v", err) 27 | } 28 | if base != "unit.test" { 29 | t.Fatalf("Base domain mismatch. Expected %s, got %s", "unit.test", base) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /controllers/publishingstrategy/finalizer.go: -------------------------------------------------------------------------------- 1 | package publishingstrategy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/go-logr/logr" 8 | localctlutils "github.com/openshift/cloud-ingress-operator/pkg/controllerutils" 9 | "github.com/openshift/cloud-ingress-operator/pkg/ingresscontroller" 10 | ) 11 | 12 | // addFinalizer adds Finalizer to an IngressController 13 | func (r *PublishingStrategyReconciler) addFinalizer(reqLogger logr.Logger, ingressController *ingresscontroller.IngressController, finalizer string) error { 14 | reqLogger.Info(fmt.Sprintf("Adding Finalizer %v for the IngressController %v", finalizer, ingressController.Name)) 15 | ingressController.SetFinalizers(append(ingressController.GetFinalizers(), finalizer)) 16 | 17 | // Update CR 18 | err := r.Client.Update(context.TODO(), ingressController) 19 | if err != nil { 20 | reqLogger.Error(err, "Failed to update IngressController with finalizer") 21 | return err 22 | } 23 | return nil 24 | } 25 | 26 | // removeFinalizer removes a Finalizer from an IngressController 27 | func (r *PublishingStrategyReconciler) removeFinalizer(reqLogger logr.Logger, ingressController *ingresscontroller.IngressController, finalizer string) error { 28 | reqLogger.Info(fmt.Sprintf("Removing Finalizer %v for the IngressController %v", finalizer, ingressController.Name)) 29 | ingressController.SetFinalizers(localctlutils.Remove(ingressController.GetFinalizers(), finalizer)) 30 | 31 | // Update CR 32 | err := r.Client.Update(context.TODO(), ingressController) 33 | if err != nil { 34 | reqLogger.Error(err, fmt.Sprintf("Failed to remove Finalizer %v", finalizer)) 35 | return err 36 | } 37 | return nil 38 | } 39 | -------------------------------------------------------------------------------- /controllers/publishingstrategy/publishingstrategy_controller_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package publishingstrategy 18 | 19 | import ( 20 | "testing" 21 | 22 | . "github.com/onsi/ginkgo" 23 | . "github.com/onsi/gomega" 24 | //+kubebuilder:scaffold:imports 25 | ) 26 | 27 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 28 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 29 | 30 | func TestAPIs(t *testing.T) { 31 | RegisterFailHandler(Fail) 32 | 33 | RunSpecs(t, "Controller Suite") 34 | } 35 | -------------------------------------------------------------------------------- /controllers/routerservice/routerservice_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package routerservice 18 | 19 | import ( 20 | "context" 21 | 22 | baseutils "github.com/openshift/cloud-ingress-operator/pkg/utils" 23 | corev1 "k8s.io/api/core/v1" 24 | "k8s.io/apimachinery/pkg/api/errors" 25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 | "sigs.k8s.io/controller-runtime/pkg/event" 27 | logf "sigs.k8s.io/controller-runtime/pkg/log" 28 | "sigs.k8s.io/controller-runtime/pkg/predicate" 29 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 30 | 31 | "k8s.io/apimachinery/pkg/runtime" 32 | ctrl "sigs.k8s.io/controller-runtime" 33 | "sigs.k8s.io/controller-runtime/pkg/client" 34 | ) 35 | 36 | var log = logf.Log.WithName("controller_router_service") 37 | 38 | const ( 39 | RouterServiceNamespace = "openshift-ingress" 40 | ELBAnnotationKey = "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout" 41 | ELBAnnotationValue = "1800" 42 | ) 43 | 44 | // RouterServiceReconciler reconciles a RouterService object 45 | type RouterServiceReconciler struct { 46 | Client client.Client 47 | Scheme *runtime.Scheme 48 | } 49 | 50 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 51 | // move the current state of the cluster closer to the desired state. 52 | // TODO(user): Modify the Reconcile function to compare the state specified by 53 | // the RouterService object against the actual cluster state, and then 54 | // perform operations to make the cluster state reflect the state specified by 55 | // the user. 56 | // 57 | // For more details, check Reconcile and its Result here: 58 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile 59 | 60 | // Reconcile reads that state of the cluster for a RouterService object and makes changes based on the state read 61 | // and what is in the Service.Spec 62 | // Note: 63 | // The Controller will requeue the Request to be processed again if the returned error is non-nil or 64 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. 65 | func (r *RouterServiceReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { 66 | reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) 67 | 68 | // Fetch the Service 69 | svc := &corev1.Service{} 70 | err := r.Client.Get(context.TODO(), request.NamespacedName, svc) 71 | if err != nil { 72 | if errors.IsNotFound(err) { 73 | // Request object not found, could have been deleted after reconcile request. 74 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 75 | // Return and don't requeue 76 | return reconcile.Result{}, nil 77 | } 78 | // Error reading the object - requeue the request. 79 | return reconcile.Result{}, err 80 | } 81 | 82 | // Only check LoadBalancer service types for annotations 83 | // Only set timeout annotations on services for < OCP 4.11. In 4.11+, the cluster-ingress-operator maintains this annotation 84 | if svc.Spec.Type == corev1.ServiceTypeLoadBalancer && !baseutils.IsVersionHigherThan("4.11") { 85 | if !metav1.HasAnnotation(svc.ObjectMeta, ELBAnnotationKey) || 86 | svc.Annotations[ELBAnnotationKey] != ELBAnnotationValue { 87 | reqLogger.Info("Updating annotation for " + svc.Name) 88 | metav1.SetMetaDataAnnotation(&svc.ObjectMeta, ELBAnnotationKey, ELBAnnotationValue) 89 | err = r.Client.Update(context.TODO(), svc) 90 | if err != nil { 91 | reqLogger.Error(err, "Error updating service annotation") 92 | return reconcile.Result{}, err 93 | } 94 | } else { 95 | reqLogger.Info("skipping service " + svc.Name + " w/ proper annotations") 96 | } 97 | } 98 | 99 | return reconcile.Result{}, nil 100 | } 101 | 102 | // Only filter on services in the openshift-ingress namespace and create/update events 103 | func eventPredicates() predicate.Predicate { 104 | return predicate.Funcs{ 105 | CreateFunc: func(e event.CreateEvent) bool { 106 | return e.Object.GetNamespace() == RouterServiceNamespace 107 | 108 | }, 109 | UpdateFunc: func(e event.UpdateEvent) bool { 110 | return e.ObjectNew.GetNamespace() == RouterServiceNamespace 111 | }, 112 | } 113 | } 114 | 115 | // SetupWithManager sets up the controller with the Manager. 116 | func (r *RouterServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { 117 | return ctrl.NewControllerManagedBy(mgr). 118 | For(&corev1.Service{}). 119 | WithEventFilter(eventPredicates()). 120 | Complete(r) 121 | } 122 | -------------------------------------------------------------------------------- /controllers/routerservice/routerservice_controller_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package routerservice 18 | 19 | import ( 20 | "testing" 21 | 22 | . "github.com/onsi/ginkgo" 23 | . "github.com/onsi/gomega" 24 | //+kubebuilder:scaffold:imports 25 | ) 26 | 27 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 28 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 29 | 30 | func TestAPIs(t *testing.T) { 31 | RegisterFailHandler(Fail) 32 | 33 | RunSpecs(t, "Controller Suite") 34 | } 35 | -------------------------------------------------------------------------------- /controllers/routerservice/routerservice_controller_test.go: -------------------------------------------------------------------------------- 1 | package routerservice 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/types" 10 | "k8s.io/client-go/kubernetes/scheme" 11 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 12 | logf "sigs.k8s.io/controller-runtime/pkg/log" 13 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 14 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 15 | ) 16 | 17 | // TestRouterServiceController runs ReconcileRouterService.Reconcile() against a 18 | // fake client that tracks a Service object. 19 | func TestRouterServiceController(t *testing.T) { 20 | // Set the logger to development mode for verbose logs. 21 | logf.SetLogger(zap.New()) 22 | 23 | var ( 24 | name = "router-default" 25 | namespace = "openshift-ingress" 26 | ) 27 | 28 | // router-default service 29 | routerDefaultSvc := &corev1.Service{ 30 | ObjectMeta: metav1.ObjectMeta{ 31 | Name: name, 32 | Namespace: namespace, 33 | }, 34 | Spec: corev1.ServiceSpec{ 35 | Type: corev1.ServiceTypeLoadBalancer, 36 | }, 37 | } 38 | 39 | // Register operator types with the runtime scheme. 40 | s := scheme.Scheme 41 | 42 | // Create a fake client to mock API calls. 43 | cl := fake. 44 | NewClientBuilder(). 45 | WithScheme(s). 46 | WithObjects(routerDefaultSvc). 47 | Build() 48 | 49 | s.AddKnownTypes(corev1.SchemeGroupVersion, routerDefaultSvc) 50 | 51 | log.Info("Creating ReconcileRouterService") 52 | // Create a ReconcileRouterService object with the scheme and fake client. 53 | r := &RouterServiceReconciler{Client: cl, Scheme: s} 54 | 55 | // Mock request to simulate Reconcile() being called on an event for a 56 | // watched resource . 57 | req := reconcile.Request{ 58 | NamespacedName: types.NamespacedName{ 59 | Name: name, 60 | Namespace: namespace, 61 | }, 62 | } 63 | log.Info("Calling Reconcile()") 64 | res, err := r.Reconcile(context.TODO(), req) 65 | if err != nil { 66 | t.Fatalf("reconcile: (%v)", err) 67 | } 68 | 69 | // Check the result of reconciliation to make sure it has the desired state. 70 | if res.Requeue { 71 | t.Error("reconcile requeue which is not expected") 72 | } 73 | 74 | // Reconcile again so Reconcile() checks routes and updates the Service 75 | // resources' Status. 76 | res, err = r.Reconcile(context.TODO(), req) 77 | if err != nil { 78 | t.Fatalf("reconcile: (%v)", err) 79 | } 80 | if res != (reconcile.Result{}) { 81 | t.Error("reconcile did not return an empty Result") 82 | } 83 | 84 | // Get the updated Service object. 85 | actualSvc := &corev1.Service{} 86 | err = r.Client.Get(context.TODO(), req.NamespacedName, actualSvc) 87 | if err != nil { 88 | t.Errorf("get service: (%v)", err) 89 | } 90 | if !metav1.HasAnnotation(actualSvc.ObjectMeta, ELBAnnotationKey) { 91 | t.Error("service does not have expected annotation") 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /deploy/05_cloud-ingress-operator.ServiceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-cloud-ingress-operator 6 | -------------------------------------------------------------------------------- /deploy/20_cloud-ingress-operator.ClusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: cloud-ingress-operator 5 | rules: 6 | - apiGroups: 7 | - config.openshift.io 8 | resources: 9 | - clusterversions 10 | - infrastructures 11 | - apiservers 12 | - dnses 13 | verbs: 14 | - list 15 | - get 16 | - watch 17 | - apiGroups: 18 | - config.openshift.io 19 | resources: 20 | - apiservers 21 | verbs: 22 | - patch 23 | - update 24 | - watch -------------------------------------------------------------------------------- /deploy/20_cloud-ingress-operator.Role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-cloud-ingress-operator 6 | rules: 7 | - apiGroups: 8 | - cloudingress.managed.openshift.io 9 | resources: 10 | - '*' 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - pods 23 | - services 24 | - services/finalizers 25 | - endpoints 26 | - persistentvolumeclaims 27 | - events 28 | - configmaps 29 | - secrets 30 | verbs: 31 | - create 32 | - delete 33 | - get 34 | - list 35 | - patch 36 | - update 37 | - watch 38 | - apiGroups: 39 | - apps 40 | resources: 41 | - deployments 42 | - daemonsets 43 | - replicasets 44 | - statefulsets 45 | verbs: 46 | - create 47 | - delete 48 | - get 49 | - list 50 | - patch 51 | - update 52 | - watch 53 | - apiGroups: 54 | - monitoring.coreos.com 55 | resources: 56 | - servicemonitors 57 | verbs: 58 | - create 59 | - delete 60 | - get 61 | - list 62 | - patch 63 | - update 64 | - watch 65 | - apiGroups: 66 | - apps 67 | resourceNames: 68 | - cloud-ingress-operator 69 | resources: 70 | - deployments/finalizers 71 | verbs: 72 | - update 73 | - apiGroups: 74 | - "" 75 | resources: 76 | - pods 77 | verbs: 78 | - get 79 | - apiGroups: 80 | - apps 81 | resources: 82 | - replicasets 83 | verbs: 84 | - get -------------------------------------------------------------------------------- /deploy/20_cloud-ingress-operator_trusted-ca-bundle.ConfigMap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: openshift-cloud-ingress-operator 5 | name: trusted-ca-bundle 6 | labels: 7 | config.openshift.io/inject-trusted-cabundle: "true" 8 | -------------------------------------------------------------------------------- /deploy/20_cluster_config_v1_reader_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | creationTimestamp: null 5 | name: cluster-config-v1-reader-cio 6 | namespace: kube-system 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resourceNames: 11 | - cluster-config-v1 12 | resources: 13 | - configmaps 14 | verbs: 15 | - get 16 | -------------------------------------------------------------------------------- /deploy/20_cluster_config_v1_reader_role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cloud-ingress-operator-cluster-config-v1-reader 5 | namespace: kube-system 6 | labels: 7 | owner: cloud-ingress-operator 8 | owner.namespace: openshift-cloud-ingress-operator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: cloud-ingress-operator 12 | namespace: openshift-cloud-ingress-operator 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | name: cluster-config-v1-reader-cio 17 | -------------------------------------------------------------------------------- /deploy/30_cloud-ingress-operator-RoleBinding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-cloud-ingress-operator 6 | subjects: 7 | - kind: ServiceAccount 8 | name: cloud-ingress-operator 9 | namespace: openshift-cloud-ingress-operator 10 | roleRef: 11 | kind: Role 12 | name: cloud-ingress-operator 13 | namespace: openshift-cloud-ingress-operator 14 | apiGroup: rbac.authorization.k8s.io 15 | -------------------------------------------------------------------------------- /deploy/40_prom-k8s-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: openshift-cloud-ingress-operator 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - endpoints 12 | - pods 13 | verbs: 14 | - get 15 | - list 16 | - watch -------------------------------------------------------------------------------- /deploy/40_prom-k8s-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: openshift-cloud-ingress-operator 6 | roleRef: 7 | kind: Role 8 | name: prometheus-k8s 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: openshift-monitoring -------------------------------------------------------------------------------- /deploy/45_prom-k8s-prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-cloud-ingress-operator 6 | labels: 7 | role: alert-rules 8 | spec: 9 | groups: 10 | - name: openshift-cloud-ingress.rules 11 | rules: 12 | - alert: APISchemeStatusFailing 13 | expr: cloud_ingress_operator_apischeme_status == 0 14 | for: 5m 15 | labels: 16 | severity: warning 17 | annotations: 18 | message: APIScheme Conditional Status is degraded. 19 | - alert: APISchemeStatusUnavailable 20 | expr: cloud_ingress_operator_apischeme_status != 1 21 | for: 5m 22 | labels: 23 | severity: warning 24 | annotations: 25 | message: APIScheme Conditional Status is unavailable. -------------------------------------------------------------------------------- /deploy/50_cloud-ingress-operator.Deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-cloud-ingress-operator 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: cloud-ingress-operator 11 | template: 12 | metadata: 13 | labels: 14 | name: cloud-ingress-operator 15 | spec: 16 | serviceAccountName: cloud-ingress-operator 17 | affinity: 18 | nodeAffinity: 19 | preferredDuringSchedulingIgnoredDuringExecution: 20 | - preference: 21 | matchExpressions: 22 | - key: node-role.kubernetes.io/infra 23 | operator: Exists 24 | weight: 1 25 | tolerations: 26 | - operator: Exists 27 | key: node-role.kubernetes.io/infra 28 | effect: NoSchedule 29 | volumes: 30 | - configMap: 31 | defaultMode: 420 32 | items: 33 | - key: ca-bundle.crt 34 | path: tls-ca-bundle.pem 35 | name: trusted-ca-bundle 36 | name: trusted-ca-bundle 37 | - name: bound-sa-token 38 | projected: 39 | sources: 40 | - serviceAccountToken: 41 | path: token 42 | audience: openshift 43 | containers: 44 | - name: cloud-ingress-operator 45 | # Replace this with the built image name 46 | image: REPLACE_IMAGE 47 | command: 48 | - cloud-ingress-operator 49 | imagePullPolicy: Always 50 | env: 51 | # "" so that the cache can read objects outside its namespace 52 | - name: WATCH_NAMESPACE 53 | value: "openshift-cloud-ingress-operator,openshift-ingress,openshift-ingress-operator,openshift-kube-apiserver,openshift-machine-api" 54 | - name: POD_NAME 55 | valueFrom: 56 | fieldRef: 57 | fieldPath: metadata.name 58 | - name: OPERATOR_NAME 59 | value: "cloud-ingress-operator" 60 | resources: 61 | requests: 62 | cpu: "200m" 63 | limits: 64 | memory: "4G" 65 | cpu: "200m" 66 | livenessProbe: 67 | httpGet: 68 | path: /healthz 69 | scheme: HTTP 70 | port: 8000 71 | initialDelaySeconds: 45 72 | periodSeconds: 75 73 | volumeMounts: 74 | - mountPath: /etc/pki/ca-trust/extracted/pem 75 | name: trusted-ca-bundle 76 | readOnly: true 77 | - name: bound-sa-token 78 | mountPath: /var/run/secrets/openshift/serviceaccount 79 | -------------------------------------------------------------------------------- /deploy/crds/cloudingress.managed.openshift.io_apischemes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.16.4 7 | name: apischemes.cloudingress.managed.openshift.io 8 | spec: 9 | group: cloudingress.managed.openshift.io 10 | names: 11 | kind: APIScheme 12 | listKind: APISchemeList 13 | plural: apischemes 14 | singular: apischeme 15 | scope: Namespaced 16 | versions: 17 | - name: v1alpha1 18 | schema: 19 | openAPIV3Schema: 20 | description: APIScheme is the Schema for the apischemes API 21 | properties: 22 | apiVersion: 23 | description: |- 24 | APIVersion defines the versioned schema of this representation of an object. 25 | Servers should convert recognized schemas to the latest internal value, and 26 | may reject unrecognized values. 27 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 28 | type: string 29 | kind: 30 | description: |- 31 | Kind is a string value representing the REST resource this object represents. 32 | Servers may infer this from the endpoint the client submits requests to. 33 | Cannot be updated. 34 | In CamelCase. 35 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 36 | type: string 37 | metadata: 38 | type: object 39 | spec: 40 | description: APISchemeSpec defines the desired state of APIScheme 41 | properties: 42 | managementAPIServerIngress: 43 | description: 'Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' 44 | properties: 45 | allowedCIDRBlocks: 46 | description: AllowedCIDRBlocks is the list of CIDR blocks that 47 | should be allowed to access the management API 48 | items: 49 | type: string 50 | type: array 51 | dnsName: 52 | description: DNSName is the name that should be used for DNS of 53 | the management API, eg rh-api 54 | type: string 55 | enabled: 56 | description: Enabled to create the Management API endpoint or 57 | not. 58 | type: boolean 59 | required: 60 | - allowedCIDRBlocks 61 | - dnsName 62 | - enabled 63 | type: object 64 | required: 65 | - managementAPIServerIngress 66 | type: object 67 | status: 68 | description: APISchemeStatus defines the observed state of APIScheme 69 | properties: 70 | cloudLoadBalancerDNSName: 71 | description: 'Important: Run "make" to regenerate code after modifying 72 | this file' 73 | type: string 74 | conditions: 75 | items: 76 | description: APISchemeCondition is the history of transitions 77 | properties: 78 | allowedCIDRBlocks: 79 | description: AllowedCIDRBlocks currently allowed (as of the 80 | last successful Security Group update) 81 | items: 82 | type: string 83 | type: array 84 | lastProbeTime: 85 | description: LastProbeTime last time probed 86 | format: date-time 87 | type: string 88 | lastTransitionTime: 89 | description: LastTransitionTime Last change to status 90 | format: date-time 91 | type: string 92 | message: 93 | description: Message is an English text 94 | type: string 95 | reason: 96 | description: Reason is why we're making this status change 97 | type: string 98 | status: 99 | description: Status 100 | type: string 101 | type: 102 | description: Type is the type of condition 103 | type: string 104 | required: 105 | - lastProbeTime 106 | - lastTransitionTime 107 | - message 108 | - reason 109 | - status 110 | type: object 111 | type: array 112 | state: 113 | description: APISchemeConditionType - APISchemeConditionType 114 | type: string 115 | type: object 116 | required: 117 | - spec 118 | type: object 119 | served: true 120 | storage: true 121 | subresources: 122 | status: {} 123 | -------------------------------------------------------------------------------- /examples/cloudingress.managed.openshift.io_v1alpha1_apischeme_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloudingress.managed.openshift.io/v1alpha1 2 | kind: APIScheme 3 | metadata: 4 | name: example-apischeme 5 | spec: 6 | # Add fields here 7 | managementAPIServerIngress: 8 | enabled: true 9 | dnsName: rh-api 10 | allowedCIDRBlocks: 11 | - "0.0.0.0/0" 12 | 13 | -------------------------------------------------------------------------------- /examples/cloudingress.managed.openshift.io_v1alpha1_publishingstrategy_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloudingress.managed.openshift.io/v1alpha1 2 | kind: PublishingStrategy 3 | metadata: 4 | name: example-publishingstrategy 5 | spec: 6 | # Add fields here 7 | defaultAPIServerIngress: 8 | listening: external 9 | applicationIngress: 10 | - listening: external 11 | default: true 12 | dnsName: "apps.default1.domain" 13 | certificate: 14 | name: foo 15 | namespace: bar 16 | - listening: internal 17 | default: false 18 | dnsName: "apps2.non-default.domain" 19 | certificate: 20 | name: foo 21 | namespace: bar -------------------------------------------------------------------------------- /fips.go: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/openshift/cloud-ingress-operator 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.24.5 6 | 7 | require ( 8 | github.com/aws/aws-sdk-go v1.44.86 9 | github.com/go-logr/logr v1.4.3 10 | github.com/golang/mock v1.6.0 11 | github.com/hashicorp/go-version v1.7.0 12 | github.com/onsi/ginkgo v1.16.5 13 | github.com/onsi/gomega v1.38.0 14 | // go get -u github.com/openshift/api@release-4.13 15 | github.com/openshift/api v0.0.0-20240522145529-93d6bda14341 16 | github.com/openshift/operator-custom-metrics v0.5.1 17 | github.com/operator-framework/operator-lib v0.11.0 18 | github.com/prometheus/client_golang v1.23.0 19 | github.com/stretchr/testify v1.10.0 20 | go.uber.org/zap v1.26.0 21 | golang.org/x/oauth2 v0.30.0 22 | google.golang.org/api v0.128.0 23 | gopkg.in/yaml.v2 v2.4.0 24 | k8s.io/api v0.31.3 25 | k8s.io/apimachinery v0.31.3 26 | k8s.io/client-go v0.31.3 27 | k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 28 | k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 29 | sigs.k8s.io/controller-runtime v0.19.0 30 | ) 31 | 32 | require ( 33 | github.com/onsi/ginkgo/v2 v2.23.4 34 | github.com/openshift/osde2e-common v0.0.0-20250812151315-081151385798 35 | ) 36 | 37 | require ( 38 | cloud.google.com/go/compute/metadata v0.3.0 // indirect 39 | github.com/beorn7/perks v1.0.1 // indirect 40 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 41 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 42 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 43 | github.com/evanphx/json-patch v5.6.0+incompatible // indirect 44 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect 45 | github.com/fsnotify/fsnotify v1.7.0 // indirect 46 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 47 | github.com/go-logr/zapr v1.3.0 // indirect 48 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 49 | github.com/go-openapi/jsonreference v0.20.2 // indirect 50 | github.com/go-openapi/swag v0.22.4 // indirect 51 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 52 | github.com/gogo/protobuf v1.3.2 // indirect 53 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 54 | github.com/golang/protobuf v1.5.4 // indirect 55 | github.com/google/gnostic-models v0.6.8 // indirect 56 | github.com/google/go-cmp v0.7.0 // indirect 57 | github.com/google/gofuzz v1.2.0 // indirect 58 | github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect 59 | github.com/google/s2a-go v0.1.4 // indirect 60 | github.com/google/uuid v1.6.0 // indirect 61 | github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect 62 | github.com/googleapis/gax-go/v2 v2.12.0 // indirect 63 | github.com/gorilla/websocket v1.5.0 // indirect 64 | github.com/imdario/mergo v0.3.15 // indirect 65 | github.com/jmespath/go-jmespath v0.4.0 // indirect 66 | github.com/josharian/intern v1.0.0 // indirect 67 | github.com/json-iterator/go v1.1.12 // indirect 68 | github.com/mailru/easyjson v0.7.7 // indirect 69 | github.com/moby/spdystream v0.4.0 // indirect 70 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 71 | github.com/modern-go/reflect2 v1.0.2 // indirect 72 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 73 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect 74 | github.com/nxadm/tail v1.4.8 // indirect 75 | github.com/pkg/errors v0.9.1 // indirect 76 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 77 | github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.55.0 // indirect 78 | github.com/prometheus/client_model v0.6.2 // indirect 79 | github.com/prometheus/common v0.65.0 // indirect 80 | github.com/prometheus/procfs v0.16.1 // indirect 81 | github.com/spf13/pflag v1.0.5 // indirect 82 | github.com/x448/float16 v0.8.4 // indirect 83 | go.opencensus.io v0.24.0 // indirect 84 | go.uber.org/automaxprocs v1.6.0 // indirect 85 | go.uber.org/multierr v1.11.0 // indirect 86 | golang.org/x/crypto v0.39.0 // indirect 87 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect 88 | golang.org/x/net v0.41.0 // indirect 89 | golang.org/x/sys v0.33.0 // indirect 90 | golang.org/x/term v0.32.0 // indirect 91 | golang.org/x/text v0.26.0 // indirect 92 | golang.org/x/time v0.3.0 // indirect 93 | golang.org/x/tools v0.33.0 // indirect 94 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 95 | google.golang.org/appengine v1.6.7 // indirect 96 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect 97 | google.golang.org/grpc v1.65.0 // indirect 98 | google.golang.org/protobuf v1.36.6 // indirect 99 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 100 | gopkg.in/inf.v0 v0.9.1 // indirect 101 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 102 | gopkg.in/yaml.v3 v3.0.1 // indirect 103 | k8s.io/apiextensions-apiserver v0.31.0 // indirect 104 | k8s.io/klog/v2 v2.130.1 // indirect 105 | sigs.k8s.io/e2e-framework v0.5.0 // indirect 106 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 107 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 108 | sigs.k8s.io/yaml v1.4.0 // indirect 109 | ) 110 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "reflect" 6 | "testing" 7 | 8 | "sigs.k8s.io/controller-runtime/pkg/cache" 9 | ) 10 | 11 | func TestGetWatchNamespaces(t *testing.T) { 12 | tests := []struct { 13 | name string 14 | envValue string 15 | setEnv bool 16 | expectError bool 17 | expectKeys []string 18 | }{ 19 | { 20 | name: "unset env returns error", 21 | setEnv: false, 22 | expectError: true, 23 | expectKeys: nil, 24 | }, 25 | { 26 | name: "empty env allows cluster scope", 27 | envValue: "", 28 | setEnv: true, 29 | expectError: false, 30 | expectKeys: nil, 31 | }, 32 | { 33 | name: "single namespace", 34 | envValue: "ns1", 35 | setEnv: true, 36 | expectError: false, 37 | expectKeys: []string{"ns1"}, 38 | }, 39 | { 40 | name: "multiple namespaces comma separated", 41 | envValue: "ns1,ns2,ns3", 42 | setEnv: true, 43 | expectError: false, 44 | expectKeys: []string{"ns1", "ns2", "ns3"}, 45 | }, 46 | { 47 | name: "trims whitespace around namespaces", 48 | envValue: " ns1 , ns2 ", 49 | setEnv: true, 50 | expectError: false, 51 | expectKeys: []string{"ns1", "ns2"}, 52 | }, 53 | } 54 | 55 | for _, tt := range tests { 56 | t.Run(tt.name, func(t *testing.T) { 57 | if tt.setEnv { 58 | os.Setenv(watchNamespaceEnvVar, tt.envValue) 59 | defer os.Unsetenv(watchNamespaceEnvVar) 60 | } else { 61 | os.Unsetenv(watchNamespaceEnvVar) 62 | } 63 | 64 | nsMap, err := getWatchNamespaces() 65 | if tt.expectError && err == nil { 66 | t.Fatalf("expected error, got none") 67 | } 68 | if !tt.expectError && err != nil { 69 | t.Fatalf("unexpected error: %v", err) 70 | } 71 | if !tt.expectError { 72 | if tt.expectKeys == nil { 73 | // Cluster-wide scope should return nil map 74 | if nsMap != nil { 75 | t.Errorf("expected nil map for cluster scope, got %v", nsMap) 76 | } 77 | } else { 78 | for _, key := range tt.expectKeys { 79 | if _, ok := nsMap[key]; !ok { 80 | t.Errorf("expected key %s in map, but not found", key) 81 | } 82 | } 83 | // Check size matches 84 | if len(nsMap) != len(tt.expectKeys) { 85 | t.Errorf("expected %d namespaces, got %d", len(tt.expectKeys), len(nsMap)) 86 | } 87 | // Ensure all values are of type cache.Config 88 | for k, v := range nsMap { 89 | if !reflect.DeepEqual(v, cache.Config{}) { 90 | t.Errorf("expected empty cache.Config for key %s, got %#v", k, v) 91 | } 92 | } 93 | } 94 | } 95 | }) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /pkg/cloudclient/add_aws.go: -------------------------------------------------------------------------------- 1 | package cloudclient 2 | 3 | import ( 4 | "github.com/openshift/cloud-ingress-operator/pkg/cloudclient/aws" 5 | "sigs.k8s.io/controller-runtime/pkg/client" 6 | ) 7 | 8 | func init() { 9 | Register( 10 | aws.ClientIdentifier, 11 | produceAWS, 12 | ) 13 | } 14 | 15 | func produceAWS(kclient client.Client) CloudClient { 16 | cli, err := aws.NewClient(kclient) 17 | if err != nil { 18 | panic(err) 19 | } 20 | 21 | return cli 22 | } 23 | -------------------------------------------------------------------------------- /pkg/cloudclient/add_aws_test.go: -------------------------------------------------------------------------------- 1 | package cloudclient 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/openshift/cloud-ingress-operator/pkg/testutils" 7 | "k8s.io/apimachinery/pkg/runtime" 8 | ) 9 | 10 | func TestProduceAWSPanics(t *testing.T) { 11 | defer func() { 12 | if r := recover(); r == nil { 13 | t.Errorf("testing panic: should have been failed") 14 | } 15 | }() 16 | objs := []runtime.Object{} 17 | mocks := testutils.NewTestMock(t, objs) 18 | _ = produceAWS(mocks.FakeKubeClient) 19 | } 20 | -------------------------------------------------------------------------------- /pkg/cloudclient/add_gcp.go: -------------------------------------------------------------------------------- 1 | package cloudclient 2 | 3 | import ( 4 | "github.com/openshift/cloud-ingress-operator/pkg/cloudclient/gcp" 5 | "sigs.k8s.io/controller-runtime/pkg/client" 6 | ) 7 | 8 | func init() { 9 | Register( 10 | gcp.ClientIdentifier, 11 | produceGCP, 12 | ) 13 | } 14 | 15 | func produceGCP(kclient client.Client) CloudClient { 16 | cli, err := gcp.NewClient(kclient) 17 | if err != nil { 18 | panic(err) 19 | } 20 | 21 | return cli 22 | } 23 | -------------------------------------------------------------------------------- /pkg/cloudclient/add_gcp_test.go: -------------------------------------------------------------------------------- 1 | package cloudclient 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/openshift/cloud-ingress-operator/pkg/testutils" 7 | "k8s.io/apimachinery/pkg/runtime" 8 | ) 9 | 10 | func TestProducePanics(t *testing.T) { 11 | defer func() { 12 | if r := recover(); r == nil { 13 | t.Errorf("testing panic: should have been failed") 14 | } 15 | }() 16 | objs := []runtime.Object{} 17 | mocks := testutils.NewTestMock(t, objs) 18 | _ = produceGCP(mocks.FakeKubeClient) 19 | } 20 | -------------------------------------------------------------------------------- /pkg/cloudclient/aws/aws.go: -------------------------------------------------------------------------------- 1 | package aws 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/aws/session" 10 | 11 | "github.com/aws/aws-sdk-go/service/ec2" 12 | "github.com/aws/aws-sdk-go/service/ec2/ec2iface" 13 | "github.com/aws/aws-sdk-go/service/elb" 14 | "github.com/aws/aws-sdk-go/service/elb/elbiface" 15 | "github.com/aws/aws-sdk-go/service/elbv2" 16 | "github.com/aws/aws-sdk-go/service/elbv2/elbv2iface" 17 | "github.com/aws/aws-sdk-go/service/route53" 18 | "github.com/aws/aws-sdk-go/service/route53/route53iface" 19 | 20 | configv1 "github.com/openshift/api/config/v1" 21 | cloudingressv1alpha1 "github.com/openshift/cloud-ingress-operator/api/v1alpha1" 22 | "github.com/openshift/cloud-ingress-operator/config" 23 | corev1 "k8s.io/api/core/v1" 24 | "k8s.io/apimachinery/pkg/types" 25 | k8s "sigs.k8s.io/controller-runtime/pkg/client" 26 | 27 | logf "sigs.k8s.io/controller-runtime/pkg/log" 28 | ) 29 | 30 | // ClientIdentifier is what kind of cloud this implement supports 31 | const ClientIdentifier configv1.PlatformType = configv1.AWSPlatformType 32 | 33 | var ( 34 | log = logf.Log.WithName("aws_cloudclient") 35 | ) 36 | 37 | // Client represents an AWS Client 38 | type Client struct { 39 | ec2Client ec2iface.EC2API 40 | route53Client route53iface.Route53API 41 | elbClient elbiface.ELBAPI 42 | elbv2Client elbv2iface.ELBV2API 43 | } 44 | 45 | // EnsureAdminAPIDNS implements cloudclient.CloudClient 46 | func (ac *Client) EnsureAdminAPIDNS(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.APIScheme, svc *corev1.Service) error { 47 | return ac.ensureAdminAPIDNS(ctx, kclient, instance, svc) 48 | } 49 | 50 | // DeleteAdminAPIDNS implements cloudclient.CloudClient 51 | func (ac *Client) DeleteAdminAPIDNS(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.APIScheme, svc *corev1.Service) error { 52 | return ac.deleteAdminAPIDNS(ctx, kclient, instance, svc) 53 | } 54 | 55 | // SetDefaultAPIPrivate implements cloudclient.CloudClient 56 | func (ac *Client) SetDefaultAPIPrivate(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.PublishingStrategy) error { 57 | return ac.setDefaultAPIPrivate(ctx, kclient, instance) 58 | } 59 | 60 | // SetDefaultAPIPublic implements cloudclient.CloudClient 61 | func (ac *Client) SetDefaultAPIPublic(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.PublishingStrategy) error { 62 | return ac.setDefaultAPIPublic(ctx, kclient, instance) 63 | } 64 | 65 | // Healthcheck performs basic calls to make sure client is healthy 66 | func (ac *Client) Healthcheck(ctx context.Context, kclient k8s.Client) error { 67 | input := &elb.DescribeLoadBalancersInput{} 68 | _, err := ac.elbClient.DescribeLoadBalancers(input) 69 | 70 | return err 71 | } 72 | 73 | func newClient(region string, kclient k8s.Client) (*Client, error) { 74 | sessionOptions := session.Options{ 75 | Config: aws.Config{ 76 | Region: aws.String(region), 77 | }, 78 | } 79 | 80 | creds := &corev1.Secret{} 81 | err := kclient.Get( 82 | context.TODO(), 83 | types.NamespacedName{ 84 | Name: config.AWSSecretName, 85 | Namespace: config.OperatorNamespace, 86 | }, 87 | creds) 88 | 89 | if err != nil { 90 | panic(fmt.Sprintf("Couldn't get secret with credentials %s", err.Error())) 91 | } 92 | 93 | // get sharedCredsFile from secret 94 | sharedCredsFile, err := SharedCredentialsFileFromSecret(creds) 95 | if err != nil { 96 | return nil, err 97 | } 98 | 99 | sessionOptions.SharedConfigState = session.SharedConfigEnable // Force enable Shared Config support 100 | sessionOptions.SharedConfigFiles = []string{sharedCredsFile} // Ordered list of files the session will load configuration from. 101 | 102 | s, err := session.NewSessionWithOptions(sessionOptions) 103 | if err != nil { 104 | return nil, err 105 | } 106 | 107 | // Remove temporary shared credentials token at end of func after creating session 108 | defer os.Remove(sharedCredsFile) 109 | 110 | return &Client{ 111 | ec2Client: ec2.New(s), 112 | elbClient: elb.New(s), 113 | elbv2Client: elbv2.New(s), 114 | route53Client: route53.New(s), 115 | }, nil 116 | } 117 | 118 | // NewClient creates a new CloudClient for use with AWS. 119 | func NewClient(kclient k8s.Client) (*Client, error) { 120 | region, err := getClusterRegion(kclient) 121 | if err != nil { 122 | return nil, fmt.Errorf("couldn't get cluster region %w", err) 123 | } 124 | 125 | c, err := newClient( 126 | region, 127 | kclient) 128 | 129 | if err != nil { 130 | return nil, fmt.Errorf("couldn't create AWS client %w", err) 131 | } 132 | 133 | return c, nil 134 | } 135 | -------------------------------------------------------------------------------- /pkg/cloudclient/aws/shared_credentials_file.go: -------------------------------------------------------------------------------- 1 | package aws 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "os" 8 | 9 | corev1 "k8s.io/api/core/v1" 10 | ) 11 | 12 | // SharedCredentialsFileFromSecret returns a path to the shared creds file created using provided secret 13 | // configure the aws session using file to use credentials eg 14 | // sharedCredentialsFile, err := SharedCredentialsFileFromSecret(secret) 15 | // 16 | // if err != nil { 17 | // // handle error 18 | // } 19 | // 20 | // options := session.Options{ 21 | // SharedConfigState: session.SharedConfigEnable, 22 | // SharedConfigFiles: []string{sharedCredentialsFile}, 23 | // } 24 | // 25 | // sess := session.Must(session.NewSessionWithOptions(options)) 26 | func SharedCredentialsFileFromSecret(secret *corev1.Secret) (string, error) { 27 | var data []byte 28 | switch { 29 | case len(secret.Data["credentials"]) > 0: 30 | data = secret.Data["credentials"] 31 | case len(secret.Data["aws_access_key_id"]) > 0 && len(secret.Data["aws_secret_access_key"]) > 0: 32 | data = newConfigForStaticCreds( 33 | string(secret.Data["aws_access_key_id"]), 34 | string(secret.Data["aws_secret_access_key"]), 35 | ) 36 | 37 | default: 38 | return "", errors.New("invalid secret for aws credentials") 39 | 40 | } 41 | 42 | f, err := os.CreateTemp("", "aws-shared-credentials") 43 | if err != nil { 44 | return "", fmt.Errorf("failed to create file for shared credentials: %v", err) 45 | } 46 | defer f.Close() 47 | 48 | if _, err := f.Write(data); err != nil { 49 | return "", fmt.Errorf("failed to write credentials to %s: %v", f.Name(), err) 50 | } 51 | 52 | return f.Name(), nil 53 | } 54 | 55 | func newConfigForStaticCreds(accessKey string, accessSecret string) []byte { 56 | buf := &bytes.Buffer{} 57 | fmt.Fprint(buf, "[default]\n") 58 | fmt.Fprintf(buf, "aws_access_key_id = %s\n", accessKey) 59 | fmt.Fprintf(buf, "aws_secret_access_key = %s\n", accessSecret) 60 | return buf.Bytes() 61 | } 62 | -------------------------------------------------------------------------------- /pkg/cloudclient/aws/shared_credentials_file_test.go: -------------------------------------------------------------------------------- 1 | package aws 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | corev1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | func TestNewConfigForStaticCreds(t *testing.T) { 12 | cases := []struct { 13 | key string 14 | secret string 15 | sharedConfig string 16 | }{{ 17 | key: `asdf`, 18 | secret: `asdf1234`, 19 | sharedConfig: `[default] 20 | aws_access_key_id = asdf 21 | aws_secret_access_key = asdf1234 22 | `, 23 | }} 24 | 25 | for _, test := range cases { 26 | t.Run("", func(t *testing.T) { 27 | sharedConfig := newConfigForStaticCreds(test.key, test.secret) 28 | assert.Equal(t, string(sharedConfig), test.sharedConfig) 29 | }) 30 | } 31 | } 32 | 33 | func TestSharedCredentialsFileFromSecret(t *testing.T) { 34 | cases := []struct { 35 | data map[string]string 36 | sharedConfig string 37 | err string 38 | }{{ 39 | data: map[string]string{ 40 | "aws_access_key_id": "asdf", 41 | "aws_secret_access_key": "asdf1234", 42 | }, 43 | sharedConfig: `[default] 44 | aws_access_key_id = asdf 45 | aws_secret_access_key = asdf1234 46 | `, 47 | }, { 48 | data: map[string]string{ 49 | "credentials": `[default] 50 | assume_role = role_for_cloud_ingress_operator 51 | web_identity_token = /path/to/sa/token 52 | `, 53 | }, 54 | 55 | sharedConfig: `[default] 56 | assume_role = role_for_cloud_ingress_operator 57 | web_identity_token = /path/to/sa/token 58 | `, 59 | }, { 60 | data: map[string]string{ 61 | "wrong_format_cred_file": "random_value", 62 | }, 63 | // err should match the defaut case for SharedCredentialsFileFromSecret() func 64 | // and return the exact same error message of `invalid secret for aws credentials` 65 | err: "invalid secret for aws credentials", 66 | }, { 67 | data: map[string]string{ 68 | "aws_access_key_id": "asdf", 69 | "aws_secret_access_key": "asdf1234", 70 | "credentials": `[default] 71 | aws_access_key_id = asdf 72 | aws_secret_access_key = asdf1234 73 | `, 74 | }, 75 | sharedConfig: `[default] 76 | aws_access_key_id = asdf 77 | aws_secret_access_key = asdf1234 78 | `, 79 | }} 80 | 81 | for _, test := range cases { 82 | t.Run("", func(t *testing.T) { 83 | secret := &corev1.Secret{ 84 | Data: map[string][]byte{}, 85 | } 86 | 87 | for k, v := range test.data { 88 | secret.Data[k] = []byte(v) 89 | } 90 | 91 | credPath, err := SharedCredentialsFileFromSecret(secret) 92 | if credPath != "" { 93 | defer os.Remove(credPath) 94 | } 95 | 96 | if test.err == "" { 97 | assert.NoError(t, err) 98 | data, err := os.ReadFile(credPath) 99 | t.Log(data) 100 | assert.NoError(t, err) 101 | assert.Equal(t, string(data), test.sharedConfig) 102 | } else { 103 | assert.Regexp(t, test.err, err) 104 | } 105 | }) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /pkg/cloudclient/cloudclient.go: -------------------------------------------------------------------------------- 1 | package cloudclient 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | configv1 "github.com/openshift/api/config/v1" 8 | 9 | cloudingressv1alpha1 "github.com/openshift/cloud-ingress-operator/api/v1alpha1" 10 | corev1 "k8s.io/api/core/v1" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | // CloudClient defines the interface for a cloud agnostic implementation 15 | // For mocking: mockgen -source=pkg/cloudclient/cloudclient.go -destination=pkg/cloudclient/mock_cloudclient/mock_cloudclient.go 16 | type CloudClient interface { 17 | 18 | /* APIScheme */ 19 | // EnsureAdminAPIDNS ensures there's a rh-api (for example) alias to the Service for the APIScheme 20 | // May return loadBalancerNotFound or other specific errors 21 | EnsureAdminAPIDNS(context.Context, client.Client, *cloudingressv1alpha1.APIScheme, *corev1.Service) error 22 | 23 | // DeleteAdminAPIDNS will ensure that the A record for the admin API (rh-api) is removed 24 | DeleteAdminAPIDNS(context.Context, client.Client, *cloudingressv1alpha1.APIScheme, *corev1.Service) error 25 | 26 | /* Publishing Strategy */ 27 | // SetDefaultAPIPrivate ensures that the default API is private, per user configure 28 | SetDefaultAPIPrivate(context.Context, client.Client, *cloudingressv1alpha1.PublishingStrategy) error 29 | 30 | // SetDefaultAPIPublic ensures that the default API is public, per user configure 31 | SetDefaultAPIPublic(context.Context, client.Client, *cloudingressv1alpha1.PublishingStrategy) error 32 | 33 | // Perform healthcheck 34 | Healthcheck(context.Context, client.Client) error 35 | } 36 | 37 | var controllerMapping = map[configv1.PlatformType]Factory{} 38 | 39 | type Factory func(client.Client) CloudClient 40 | 41 | func Register(name configv1.PlatformType, factoryFunc Factory) { 42 | controllerMapping[name] = factoryFunc 43 | } 44 | 45 | // GetClientFor returns the CloudClient for the given cloud provider, identified 46 | // by the provider's ID, eg aws for AWS's cloud client, gcp for GCP's cloud 47 | // client. 48 | func GetClientFor(kclient client.Client, cloudID configv1.PlatformType) CloudClient { 49 | if _, ok := controllerMapping[cloudID]; ok { 50 | return controllerMapping[cloudID](kclient) 51 | } 52 | // TODO: Return a minimal interface? 53 | panic(fmt.Sprintf("Couldn't find a client matching %s", cloudID)) 54 | } 55 | -------------------------------------------------------------------------------- /pkg/cloudclient/gcp/gcp.go: -------------------------------------------------------------------------------- 1 | package gcp 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "golang.org/x/oauth2/google" 8 | computev1 "google.golang.org/api/compute/v1" 9 | dnsv1 "google.golang.org/api/dns/v1" 10 | "google.golang.org/api/option" 11 | 12 | configv1 "github.com/openshift/api/config/v1" 13 | machineapi "github.com/openshift/api/machine/v1beta1" 14 | cloudingressv1alpha1 "github.com/openshift/cloud-ingress-operator/api/v1alpha1" 15 | "github.com/openshift/cloud-ingress-operator/config" 16 | baseutils "github.com/openshift/cloud-ingress-operator/pkg/utils" 17 | corev1 "k8s.io/api/core/v1" 18 | "k8s.io/apimachinery/pkg/types" 19 | k8s "sigs.k8s.io/controller-runtime/pkg/client" 20 | 21 | logf "sigs.k8s.io/controller-runtime/pkg/log" 22 | ) 23 | 24 | // ClientIdentifier is what kind of cloud this implement supports 25 | const ClientIdentifier configv1.PlatformType = configv1.GCPPlatformType 26 | 27 | var ( 28 | log = logf.Log.WithName("gcp_cloudclient") 29 | ) 30 | 31 | // Client represents a GCP cloud Client 32 | type Client struct { 33 | projectID string 34 | region string 35 | clusterName string 36 | baseDomain string 37 | masterList *machineapi.MachineList 38 | dnsService *dnsv1.Service 39 | computeService *computev1.Service 40 | } 41 | 42 | // EnsureAdminAPIDNS implements cloudclient.CloudClient 43 | func (gc *Client) EnsureAdminAPIDNS(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.APIScheme, svc *corev1.Service) error { 44 | return gc.ensureAdminAPIDNS(ctx, kclient, instance, svc) 45 | } 46 | 47 | // DeleteAdminAPIDNS implements cloudclient.CloudClient 48 | func (gc *Client) DeleteAdminAPIDNS(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.APIScheme, svc *corev1.Service) error { 49 | return gc.deleteAdminAPIDNS(ctx, kclient, instance, svc) 50 | } 51 | 52 | // SetDefaultAPIPrivate implements cloudclient.CloudClient 53 | func (gc *Client) SetDefaultAPIPrivate(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.PublishingStrategy) error { 54 | return gc.setDefaultAPIPrivate(ctx, kclient, instance) 55 | } 56 | 57 | // SetDefaultAPIPublic implements cloudclient.CloudClient 58 | func (gc *Client) SetDefaultAPIPublic(ctx context.Context, kclient k8s.Client, instance *cloudingressv1alpha1.PublishingStrategy) error { 59 | return gc.setDefaultAPIPublic(ctx, kclient, instance) 60 | } 61 | 62 | // Healthcheck performs basic calls to make sure client is healthy 63 | func (gc *Client) Healthcheck(ctx context.Context, kclient k8s.Client) error { 64 | _, err := gc.computeService.RegionBackendServices.List(gc.projectID, gc.region).Do() 65 | return err 66 | } 67 | 68 | func newClient(ctx context.Context, serviceAccountJSON []byte) (*Client, error) { 69 | credentials, err := google.CredentialsFromJSON( 70 | ctx, serviceAccountJSON, 71 | dnsv1.NdevClouddnsReadwriteScope, 72 | computev1.ComputeScope) 73 | if err != nil { 74 | return nil, err 75 | } 76 | 77 | dnsService, err := dnsv1.NewService(ctx, option.WithCredentials(credentials)) 78 | if err != nil { 79 | return nil, err 80 | } 81 | 82 | computeService, err := computev1.NewService(ctx, option.WithCredentials(credentials)) 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | return &Client{ 88 | projectID: credentials.ProjectID, 89 | dnsService: dnsService, 90 | computeService: computeService, 91 | }, nil 92 | } 93 | 94 | // NewClient creates a new CloudClient for use with GCP. 95 | func NewClient(kclient k8s.Client) (*Client, error) { 96 | ctx := context.Background() 97 | secret := &corev1.Secret{} 98 | err := kclient.Get( 99 | ctx, 100 | types.NamespacedName{ 101 | Name: config.GCPSecretName, 102 | Namespace: config.OperatorNamespace, 103 | }, 104 | secret) 105 | if err != nil { 106 | return nil, fmt.Errorf("couldn't get Secret with credentials %w", err) 107 | } 108 | serviceAccountJSON, ok := secret.Data["service_account.json"] 109 | if !ok { 110 | return nil, fmt.Errorf("access credentials missing service account") 111 | } 112 | 113 | // initialize actual client 114 | c, err := newClient(ctx, serviceAccountJSON) 115 | if err != nil { 116 | return nil, fmt.Errorf("couldn't create GCP client %s", err) 117 | } 118 | 119 | // enchant the client with params required 120 | region, err := getClusterRegion(kclient) 121 | if err != nil { 122 | return nil, err 123 | } 124 | c.region = region 125 | 126 | masterList, err := baseutils.GetMasterMachines(kclient) 127 | if err != nil { 128 | return nil, err 129 | } 130 | c.masterList = masterList 131 | infrastructureName, err := baseutils.GetClusterName(kclient) 132 | if err != nil { 133 | return nil, err 134 | } 135 | c.clusterName = infrastructureName 136 | baseDomain, err := baseutils.GetClusterBaseDomain(kclient) 137 | if err != nil { 138 | return nil, err 139 | } 140 | c.baseDomain = baseDomain 141 | 142 | return c, nil 143 | } 144 | 145 | func getClusterRegion(kclient k8s.Client) (string, error) { 146 | infra, err := baseutils.GetInfrastructureObject(kclient) 147 | if err != nil { 148 | return "", err 149 | } 150 | return infra.Status.PlatformStatus.GCP.Region, nil 151 | } 152 | -------------------------------------------------------------------------------- /pkg/cloudclient/gcp/gcp_test.go: -------------------------------------------------------------------------------- 1 | package gcp 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/openshift/cloud-ingress-operator/config" 7 | "github.com/openshift/cloud-ingress-operator/pkg/testutils" 8 | corev1 "k8s.io/api/core/v1" 9 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | ) 12 | 13 | func TestNewClient(t *testing.T) { 14 | dummySA := `{ 15 | "type": "service_account", 16 | "private_key_id": "abc", 17 | "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAKECBKgwggSkAgEAAoIBAQDY3E8o1NEFcjFAKEHW/5ZfFJw29/8NEqpViNjQIx95Xx5KDtJ+nWFAKEW0uqsSqKlKGhAdAo+Q6bjx2cFAKEVsXTu7XrZUY5Kltvj94DvUa1wjNXs606r/RxWTJ58bfdC+gLLxBfGnB6CwK0YQ\nxnfpjNbkUfVVzO0MQD7UP0Hl5ZcY0Puvxd/yHuONQn/rIAieTHH1pqgW+zrH/y3c\n59IGThC9PPtugI9ea8RSnVj3PWz1bX2UkCDpy9IRh9LzJLaYYX9RUd7++dULUlat\nAaXBh1U6emUDzhrIsgApjDVtimOPbmQWmX1S60mqQikRpVYZ8u+NDD+LNw+/Eovn\nxCj2Y3z1AgMBAAECggEAWDBzoqO1IvVXjBA2lqId10T6hXmN3j1ifyH+aAqK+FVl\nGjyWjDj0xWQcJ9ync7bQ6fSeTeNGzP0M6kzDU1+w6FgyZqwdmXWI2VmEizRjwk+/\n/uLQUcL7I55Dxn7KUoZs/rZPmQDxmGLoue60Gg6z3yLzVcKiDc7cnhzhdBgDc8vd\nQorNAlqGPRnm3EqKQ6VQp6fyQmCAxrr45kspRXNLddat3AMsuqImDkqGKBmF3Q1y\nxWGe81LphUiRqvqbyUlh6cdSZ8pLBpc9m0c3qWPKs9paqBIvgUPlvOZMqec6x4S6\nChbdkkTRLnbsRr0Yg/nDeEPlkhRBhasXpxpMUBgPywKBgQDs2axNkFjbU94uXvd5\nznUhDVxPFBuxyUHtsJNqW4p/ujLNimGet5E/YthCnQeC2P3Ym7c3fiz68amM6hiA\nOnW7HYPZ+jKFnefpAtjyOOs46AkftEg07T9XjwWNPt8+8l0DYawPoJgbM5iE0L2O\nx8TU1Vs4mXc+ql9F90GzI0x3VwKBgQDqZOOqWw3hTnNT07Ixqnmd3dugV9S7eW6o\nU9OoUgJB4rYTpG+yFqNqbRT8bkx37iKBMEReppqonOqGm4wtuRR6LSLlgcIU9Iwx\nyfH12UWqVmFSHsgZFqM/cK3wGev38h1WBIOx3/djKn7BdlKVh8kWyx6uC8bmV+E6\nOoK0vJD6kwKBgHAySOnROBZlqzkiKW8c+uU2VATtzJSydrWm0J4wUPJifNBa/hVW\ndcqmAzXC9xznt5AVa3wxHBOfyKaE+ig8CSsjNyNZ3vbmr0X04FoV1m91k2TeXNod\njMTobkPThaNm4eLJMN2SQJuaHGTGERWC0l3T18t+/zrDMDCPiSLX1NAvAoGBAN1T\nVLJYdjvIMxf1bm59VYcepbK7HLHFkRq6xMJMZbtG0ryraZjUzYvB4q4VjHk2UDiC\nlhx13tXWDZH7MJtABzjyg+AI7XWSEQs2cBXACos0M4Myc6lU+eL+iA+OuoUOhmrh\nqmT8YYGu76/IBWUSqWuvcpHPpwl7871i4Ga/I3qnAoGBANNkKAcMoeAbJQK7a/Rn\nwPEJB+dPgNDIaboAsh1nZhVhN5cvdvCWuEYgOGCPQLYQF0zmTLcM+sVxOYgfy8mV\nfbNgPgsP5xmu6dw2COBKdtozw0HrWSRjACd1N4yGu75+wPCcX/gQarcjRcXXZeEa\nNtBLSfcqPULqD+h7br9lEJio\n-----END PRIVATE KEY-----\n", 18 | "client_email": "123-abc@developer.gserviceaccount.com", 19 | "client_id": "123-abc.apps.googleusercontent.com", 20 | "auth_uri": "https://accounts.google.com/o/oauth2/auth", 21 | "token_uri": "http://localhost:8080/token" 22 | }` // #nosec G101 23 | 24 | infra := testutils.CreateGCPInfraObject("sut", testutils.DefaultAPIEndpoint, testutils.DefaultAPIEndpoint, testutils.DefaultRegionName) 25 | 26 | fakeSecret := &corev1.Secret{ 27 | ObjectMeta: v1.ObjectMeta{ 28 | Name: config.GCPSecretName, 29 | Namespace: config.OperatorNamespace, 30 | }, 31 | Data: make(map[string][]byte), 32 | } 33 | fakeSecret.Data["service_account.json"] = []byte(dummySA) 34 | 35 | objs := []runtime.Object{infra, fakeSecret} 36 | mocks := testutils.NewTestMock(t, objs) 37 | cli, err := NewClient(mocks.FakeKubeClient) 38 | 39 | if err != nil { 40 | t.Error("err occurred while creating cli:", err) 41 | } 42 | 43 | if cli == nil { 44 | t.Error("cli should have been initialized") 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/cloudclient/gcp/private_test.go: -------------------------------------------------------------------------------- 1 | package gcp 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "testing" 7 | 8 | machineapi "github.com/openshift/api/machine/v1beta1" 9 | "github.com/openshift/cloud-ingress-operator/pkg/testutils" 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/types" 14 | 15 | cioerrors "github.com/openshift/cloud-ingress-operator/pkg/errors" 16 | ) 17 | 18 | func TestGetIPAddressesFromService(t *testing.T) { 19 | tests := []struct { 20 | name string 21 | svc *corev1.Service 22 | expected_ips []string 23 | expected_err error 24 | }{ 25 | { 26 | name: "single IP", 27 | svc: &corev1.Service{ 28 | TypeMeta: metav1.TypeMeta{ 29 | Kind: "Service", 30 | APIVersion: corev1.SchemeGroupVersion.String(), 31 | }, 32 | Status: corev1.ServiceStatus{ 33 | LoadBalancer: corev1.LoadBalancerStatus{ 34 | Ingress: []corev1.LoadBalancerIngress{ 35 | { 36 | IP: "127.0.0.1", 37 | }, 38 | }, 39 | }, 40 | }, 41 | }, 42 | expected_ips: []string{ 43 | "127.0.0.1", 44 | }, 45 | }, 46 | { 47 | name: "multiple IPs", 48 | svc: &corev1.Service{ 49 | TypeMeta: metav1.TypeMeta{ 50 | Kind: "Service", 51 | APIVersion: corev1.SchemeGroupVersion.String(), 52 | }, 53 | Status: corev1.ServiceStatus{ 54 | LoadBalancer: corev1.LoadBalancerStatus{ 55 | Ingress: []corev1.LoadBalancerIngress{ 56 | { 57 | IP: "127.0.0.1", 58 | }, 59 | { 60 | IP: "10.0.0.1", 61 | }, 62 | }, 63 | }, 64 | }, 65 | }, 66 | expected_ips: []string{ 67 | "127.0.0.1", 68 | "10.0.0.1", 69 | }, 70 | }, 71 | { 72 | name: "no IPs", 73 | svc: &corev1.Service{ 74 | TypeMeta: metav1.TypeMeta{ 75 | Kind: "Service", 76 | APIVersion: corev1.SchemeGroupVersion.String(), 77 | }, 78 | Status: corev1.ServiceStatus{ 79 | LoadBalancer: corev1.LoadBalancerStatus{ 80 | Ingress: []corev1.LoadBalancerIngress{}, 81 | }, 82 | }, 83 | }, 84 | expected_ips: nil, 85 | expected_err: cioerrors.NewLoadBalancerNotReadyError(), 86 | }, 87 | } 88 | 89 | for _, test := range tests { 90 | actual, err := getIPAddressesFromService(test.svc) 91 | 92 | if !reflect.DeepEqual(actual, test.expected_ips) { 93 | t.Errorf("%s: expected %v, got %v", test.name, actual, test.expected_ips) 94 | } 95 | 96 | actualErrorType := reflect.TypeOf(err) 97 | expectErrorType := reflect.TypeOf(test.expected_err) 98 | if actualErrorType != expectErrorType { 99 | t.Errorf("%s error: expected %v, got %v", test.name, actualErrorType, expectErrorType) 100 | } 101 | } 102 | } 103 | 104 | func TestGetClusterRegion(t *testing.T) { 105 | infraObj := testutils.CreateGCPInfraObject("basename", testutils.DefaultAPIEndpoint, testutils.DefaultAPIEndpoint, testutils.DefaultRegionName) 106 | objs := []runtime.Object{infraObj} 107 | mocks := testutils.NewTestMock(t, objs) 108 | 109 | region, err := getClusterRegion(mocks.FakeKubeClient) 110 | if err != nil { 111 | t.Fatalf("Could not get cluster region: %v", err) 112 | } 113 | if region != testutils.DefaultRegionName { 114 | t.Fatalf("Cluster region name mismatch. Expected %s, got %s", testutils.DefaultRegionName, region) 115 | } 116 | 117 | } 118 | 119 | func TestGCPProviderDecodeEncode(t *testing.T) { 120 | tests := []struct { 121 | m machineapi.Machine 122 | }{ 123 | { 124 | m: testutils.CreateGCPMachineObjPre411("master-0", "decode", "master", "us-east1", "us-east1-b"), 125 | }, 126 | { 127 | m: testutils.CreateGCPMachineObj411("master-0", "decode", "master", "us-east1", "us-east1-b"), 128 | }, 129 | } 130 | 131 | for _, test := range tests { 132 | t.Run("", func(t *testing.T) { 133 | objs := []runtime.Object{&test.m} // #nosec G601 134 | mocks := testutils.NewTestMock(t, objs) 135 | machineInfo := types.NamespacedName{ 136 | Name: test.m.GetName(), 137 | Namespace: test.m.GetNamespace(), 138 | } 139 | 140 | err := mocks.FakeKubeClient.Get(context.TODO(), machineInfo, &test.m) // #nosec G601 141 | if err != nil { 142 | t.Fatalf("Couldn't reload machine %s: %v", test.m.GetName(), err) 143 | } 144 | 145 | decodedSpec, err := getGCPDecodedProviderSpec(test.m, mocks.Scheme) 146 | if err != nil { 147 | t.Fatalf("Failed to decode machine %s: %v", test.m.GetName(), err) 148 | } 149 | 150 | _, err = encodeProviderSpec(decodedSpec, mocks.Scheme) 151 | 152 | if err != nil { 153 | t.Fatalf("Failed to encode ProviderSpec for machine %s: %v", test.m.GetName(), err) 154 | } 155 | }) 156 | } 157 | 158 | } 159 | 160 | func Test_sanitizeZoneID(t *testing.T) { 161 | zoneIdSanitized := "cs-ci-jsm5n-7zbzx-private-zone" 162 | zoneIdUnsanitized := "projects/sda-ccs-3/managedZones/cs-ci-jsm5n-7zbzx-private-zone" 163 | 164 | if sanitizeZoneID(zoneIdSanitized) != zoneIdSanitized { 165 | t.Fatalf("sanitizeZoneId() sanitized an already sanitized zone ID") 166 | } 167 | 168 | if sanitizeZoneID(zoneIdUnsanitized) != zoneIdSanitized { 169 | t.Fatalf("sanitizeZoneId() did not return a sanitized zone ID") 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /pkg/cloudclient/mock_cloudclient/mock_cloudclient.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: pkg/cloudclient/cloudclient.go 3 | 4 | // Package mock_cloudclient is a generated GoMock package. 5 | package mock_cloudclient 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | 11 | gomock "github.com/golang/mock/gomock" 12 | v1alpha1 "github.com/openshift/cloud-ingress-operator/api/v1alpha1" 13 | v1 "k8s.io/api/core/v1" 14 | client "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | // MockCloudClient is a mock of CloudClient interface. 18 | type MockCloudClient struct { 19 | ctrl *gomock.Controller 20 | recorder *MockCloudClientMockRecorder 21 | } 22 | 23 | // MockCloudClientMockRecorder is the mock recorder for MockCloudClient. 24 | type MockCloudClientMockRecorder struct { 25 | mock *MockCloudClient 26 | } 27 | 28 | // NewMockCloudClient creates a new mock instance. 29 | func NewMockCloudClient(ctrl *gomock.Controller) *MockCloudClient { 30 | mock := &MockCloudClient{ctrl: ctrl} 31 | mock.recorder = &MockCloudClientMockRecorder{mock} 32 | return mock 33 | } 34 | 35 | // EXPECT returns an object that allows the caller to indicate expected use. 36 | func (m *MockCloudClient) EXPECT() *MockCloudClientMockRecorder { 37 | return m.recorder 38 | } 39 | 40 | // DeleteAdminAPIDNS mocks base method. 41 | func (m *MockCloudClient) DeleteAdminAPIDNS(arg0 context.Context, arg1 client.Client, arg2 *v1alpha1.APIScheme, arg3 *v1.Service) error { 42 | m.ctrl.T.Helper() 43 | ret := m.ctrl.Call(m, "DeleteAdminAPIDNS", arg0, arg1, arg2, arg3) 44 | ret0, _ := ret[0].(error) 45 | return ret0 46 | } 47 | 48 | // DeleteAdminAPIDNS indicates an expected call of DeleteAdminAPIDNS. 49 | func (mr *MockCloudClientMockRecorder) DeleteAdminAPIDNS(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { 50 | mr.mock.ctrl.T.Helper() 51 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAdminAPIDNS", reflect.TypeOf((*MockCloudClient)(nil).DeleteAdminAPIDNS), arg0, arg1, arg2, arg3) 52 | } 53 | 54 | // EnsureAdminAPIDNS mocks base method. 55 | func (m *MockCloudClient) EnsureAdminAPIDNS(arg0 context.Context, arg1 client.Client, arg2 *v1alpha1.APIScheme, arg3 *v1.Service) error { 56 | m.ctrl.T.Helper() 57 | ret := m.ctrl.Call(m, "EnsureAdminAPIDNS", arg0, arg1, arg2, arg3) 58 | ret0, _ := ret[0].(error) 59 | return ret0 60 | } 61 | 62 | // EnsureAdminAPIDNS indicates an expected call of EnsureAdminAPIDNS. 63 | func (mr *MockCloudClientMockRecorder) EnsureAdminAPIDNS(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { 64 | mr.mock.ctrl.T.Helper() 65 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureAdminAPIDNS", reflect.TypeOf((*MockCloudClient)(nil).EnsureAdminAPIDNS), arg0, arg1, arg2, arg3) 66 | } 67 | 68 | // Healthcheck mocks base method. 69 | func (m *MockCloudClient) Healthcheck(arg0 context.Context, arg1 client.Client) error { 70 | m.ctrl.T.Helper() 71 | ret := m.ctrl.Call(m, "Healthcheck", arg0, arg1) 72 | ret0, _ := ret[0].(error) 73 | return ret0 74 | } 75 | 76 | // Healthcheck indicates an expected call of Healthcheck. 77 | func (mr *MockCloudClientMockRecorder) Healthcheck(arg0, arg1 interface{}) *gomock.Call { 78 | mr.mock.ctrl.T.Helper() 79 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Healthcheck", reflect.TypeOf((*MockCloudClient)(nil).Healthcheck), arg0, arg1) 80 | } 81 | 82 | // SetDefaultAPIPrivate mocks base method. 83 | func (m *MockCloudClient) SetDefaultAPIPrivate(arg0 context.Context, arg1 client.Client, arg2 *v1alpha1.PublishingStrategy) error { 84 | m.ctrl.T.Helper() 85 | ret := m.ctrl.Call(m, "SetDefaultAPIPrivate", arg0, arg1, arg2) 86 | ret0, _ := ret[0].(error) 87 | return ret0 88 | } 89 | 90 | // SetDefaultAPIPrivate indicates an expected call of SetDefaultAPIPrivate. 91 | func (mr *MockCloudClientMockRecorder) SetDefaultAPIPrivate(arg0, arg1, arg2 interface{}) *gomock.Call { 92 | mr.mock.ctrl.T.Helper() 93 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefaultAPIPrivate", reflect.TypeOf((*MockCloudClient)(nil).SetDefaultAPIPrivate), arg0, arg1, arg2) 94 | } 95 | 96 | // SetDefaultAPIPublic mocks base method. 97 | func (m *MockCloudClient) SetDefaultAPIPublic(arg0 context.Context, arg1 client.Client, arg2 *v1alpha1.PublishingStrategy) error { 98 | m.ctrl.T.Helper() 99 | ret := m.ctrl.Call(m, "SetDefaultAPIPublic", arg0, arg1, arg2) 100 | ret0, _ := ret[0].(error) 101 | return ret0 102 | } 103 | 104 | // SetDefaultAPIPublic indicates an expected call of SetDefaultAPIPublic. 105 | func (mr *MockCloudClientMockRecorder) SetDefaultAPIPublic(arg0, arg1, arg2 interface{}) *gomock.Call { 106 | mr.mock.ctrl.T.Helper() 107 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDefaultAPIPublic", reflect.TypeOf((*MockCloudClient)(nil).SetDefaultAPIPublic), arg0, arg1, arg2) 108 | } 109 | -------------------------------------------------------------------------------- /pkg/controllerutils/conditions.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | cloudingressv1alpha1 "github.com/openshift/cloud-ingress-operator/api/v1alpha1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // UpdateConditionCheck tests whether a condition should be updated from the 10 | // old condition to the new condition. Returns true if the condition should 11 | // be updated. 12 | type UpdateConditionCheck func(oldReason, oldMessage, newReason, newMessage string) bool 13 | 14 | // UpdateConditionAlways returns true. The condition will always be updated. 15 | func UpdateConditionAlways(_, _, _, _ string) bool { 16 | return true 17 | } 18 | 19 | // UpdateConditionNever return false. The condition will never be updated, 20 | // unless there is a change in the status of the condition. 21 | func UpdateConditionNever(_, _, _, _ string) bool { 22 | return false 23 | } 24 | 25 | // UpdateConditionIfReasonOrMessageChange returns true if there is a change 26 | // in the reason or the message of the condition. 27 | func UpdateConditionIfReasonOrMessageChange(oldReason, oldMessage, newReason, newMessage string) bool { 28 | return oldReason != newReason || 29 | oldMessage != newMessage 30 | } 31 | 32 | // SetAPISchemeCondition sets a condition on a APIScheme resource's status 33 | func SetAPISchemeCondition( 34 | conditions []cloudingressv1alpha1.APISchemeCondition, 35 | conditionType cloudingressv1alpha1.APISchemeConditionType, 36 | status corev1.ConditionStatus, 37 | reason string, 38 | message string, 39 | updateConditionCheck UpdateConditionCheck, 40 | ) []cloudingressv1alpha1.APISchemeCondition { 41 | now := metav1.Now() 42 | existingCondition := GetLastAPISchemeCondition(conditions) 43 | if existingCondition == nil && status == corev1.ConditionFalse { 44 | // while the LB is being recreate the first time, we don't update the status to avoid clogging it 45 | return conditions 46 | } 47 | 48 | if existingCondition == nil || shouldUpdateCondition( 49 | existingCondition.Status, existingCondition.Reason, existingCondition.Message, 50 | status, reason, message, 51 | updateConditionCheck, 52 | ) { 53 | conditions = append( 54 | conditions, 55 | cloudingressv1alpha1.APISchemeCondition{ 56 | Type: conditionType, 57 | Status: status, 58 | Reason: reason, 59 | Message: message, 60 | LastTransitionTime: now, 61 | LastProbeTime: now, 62 | }, 63 | ) 64 | } 65 | 66 | return conditions 67 | } 68 | 69 | func GetLastAPISchemeCondition(conditions []cloudingressv1alpha1.APISchemeCondition) *cloudingressv1alpha1.APISchemeCondition { 70 | if len(conditions) == 0 { 71 | return nil 72 | } 73 | return &conditions[len(conditions)-1] 74 | } 75 | 76 | // FindAPISchemeCondition finds in the condition that has the matching condition type 77 | func FindAPISchemeCondition(conditions []cloudingressv1alpha1.APISchemeCondition, conditionType cloudingressv1alpha1.APISchemeConditionType) *cloudingressv1alpha1.APISchemeCondition { 78 | for i, condition := range conditions { 79 | if condition.Type == conditionType { 80 | return &conditions[i] 81 | } 82 | } 83 | return nil 84 | } 85 | 86 | func shouldUpdateCondition( 87 | oldStatus corev1.ConditionStatus, oldReason, oldMessage string, 88 | newStatus corev1.ConditionStatus, newReason, newMessage string, 89 | updateConditionCheck UpdateConditionCheck, 90 | ) bool { 91 | if oldStatus != newStatus { 92 | return true 93 | } 94 | return updateConditionCheck(oldReason, oldMessage, newReason, newMessage) 95 | } 96 | -------------------------------------------------------------------------------- /pkg/controllerutils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | // Remove removes a string from a list of strings 4 | func Remove(list []string, s string) []string { 5 | for i, v := range list { 6 | if v == s { 7 | list = append(list[:i], list[i+1:]...) 8 | } 9 | } 10 | return list 11 | } 12 | 13 | // Contains returns true a list of strings includes a specific string 14 | func Contains(list []string, s string) bool { 15 | for _, v := range list { 16 | if v == s { 17 | return true 18 | } 19 | } 20 | return false 21 | } 22 | -------------------------------------------------------------------------------- /pkg/errors/errors.go: -------------------------------------------------------------------------------- 1 | package errors 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type LoadBalancerNotReadyError struct { 8 | e string 9 | } 10 | 11 | func (e *LoadBalancerNotReadyError) Error() string { return e.e } 12 | 13 | func NewLoadBalancerNotReadyError() error { 14 | return &LoadBalancerNotReadyError{ 15 | e: "Load balancer for service is not yet ready", 16 | } 17 | } 18 | 19 | type ForwardingRuleNotFoundError struct { 20 | e string 21 | } 22 | 23 | func (e *ForwardingRuleNotFoundError) Error() string { return e.e } 24 | 25 | // Only needed for GCP 26 | func ForwardingRuleNotFound(reason string) error { 27 | return &ForwardingRuleNotFoundError{ 28 | e: "forwarding rule for svc not found in GCP. " + reason, 29 | } 30 | } 31 | 32 | type DnsUpdateError struct { 33 | e string 34 | } 35 | 36 | func (e *DnsUpdateError) Error() string { return e.e } 37 | 38 | func NewDNSUpdateError(reason string) error { 39 | return &DnsUpdateError{ 40 | e: fmt.Sprintf("DNS Update Error %s", reason), 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /pkg/ingresscontroller/addtoscheme.go: -------------------------------------------------------------------------------- 1 | package ingresscontroller 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | "k8s.io/apimachinery/pkg/runtime/schema" 6 | "sigs.k8s.io/controller-runtime/pkg/scheme" 7 | ) 8 | 9 | // AddToSchemes may be used to add all resources defined in the project to a Scheme 10 | var AddToSchemes runtime.SchemeBuilder 11 | 12 | // AddToScheme adds all Resources to the Scheme 13 | func AddToScheme(s *runtime.Scheme) error { 14 | return AddToSchemes.AddToScheme(s) 15 | } 16 | 17 | func init() { 18 | ICSchemeBuilder.Register(&IngressController{}, &IngressControllerList{}) 19 | // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back 20 | AddToSchemes = append(AddToSchemes, ICSchemeBuilder.SchemeBuilder.AddToScheme) 21 | } 22 | 23 | var ( 24 | // SchemeGroupVersion is group version used to register these objects 25 | SchemeGroupVersion = schema.GroupVersion{Group: "operator.openshift.io", Version: "v1"} 26 | 27 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 28 | ICSchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} 29 | ) 30 | -------------------------------------------------------------------------------- /pkg/localmetrics/localmetrics.go: -------------------------------------------------------------------------------- 1 | package localmetrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | ) 6 | 7 | var ( 8 | MetricDefaultIngressController = prometheus.NewGauge(prometheus.GaugeOpts{ 9 | Name: "cloud_ingress_operator_default_ingress", 10 | Help: "Report if default ingress is on cluster", 11 | }) 12 | MetricAPISchemeConditionStatus = prometheus.NewGauge(prometheus.GaugeOpts{ 13 | Name: "cloud_ingress_operator_apischeme_status", 14 | Help: "Report the status of the APIScheme status", 15 | }) 16 | 17 | MetricsList = []prometheus.Collector{ 18 | MetricDefaultIngressController, 19 | MetricAPISchemeConditionStatus, 20 | } 21 | ) 22 | -------------------------------------------------------------------------------- /pkg/utils/clusterversion.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "regexp" 8 | 9 | compare "github.com/hashicorp/go-version" 10 | configv1 "github.com/openshift/api/config/v1" 11 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/runtime/schema" 14 | "k8s.io/apimachinery/pkg/types" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | ) 17 | 18 | // GetClusterVersionObject returns the canonical ClusterVersion object 19 | // To check current version: `output.Status.History[0].Version` 20 | // 21 | // `history contains a list of the most recent versions applied to the cluster. 22 | // This value may be empty during cluster startup, and then will be updated when a new update is being applied. 23 | // The newest update is first in the list and it is ordered by recency` 24 | // 25 | // Note: 26 | // This can be queried inside the controllers, caching doesn't apply if the scope is global rather than namespaced. 27 | func GetClusterVersionObject(kclient client.Client) (*configv1.ClusterVersion, error) { 28 | u := &unstructured.Unstructured{} 29 | u.SetGroupVersionKind(schema.GroupVersionKind{ 30 | Group: "", 31 | Version: "config.openshift.io/v1", 32 | Kind: "ClusterVersion", 33 | }) 34 | ns := types.NamespacedName{ 35 | Namespace: "", 36 | Name: "version", 37 | } 38 | err := kclient.Get(context.TODO(), ns, u) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | uContent := u.UnstructuredContent() 44 | var cv *configv1.ClusterVersion 45 | err = runtime.DefaultUnstructuredConverter.FromUnstructured(uContent, &cv) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | return cv, nil 51 | } 52 | 53 | // SetClusterVersion sets the cluster version globally(to ENV as CLUSTER_VERSION) 54 | func SetClusterVersion(kclient client.Client) error { 55 | versionObject, err := GetClusterVersionObject(kclient) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | // handle when there's no object defined || no version found on history 61 | if len(versionObject.Status.History) == 0 || versionObject == nil { 62 | return fmt.Errorf("version couldn't be grabbed from clusterversion: %+v", versionObject) // (%+v) adds field names 63 | } 64 | 65 | return os.Setenv("CLUSTER_VERSION", versionObject.Status.History[0].Version) 66 | } 67 | 68 | // IsVersionHigherThan checks whether the given version is higher than the cluster version 69 | // input is required to be a version such as: 4.10 or 4.10.1 70 | // Returns false(no action) if there's an exception. 71 | func IsVersionHigherThan(input string) bool { 72 | version, ok := os.LookupEnv("CLUSTER_VERSION") 73 | if !ok { 74 | return false 75 | } 76 | // Handle the clusternames that have more than 4 chars(such as 4.10.0-rc.4) 77 | re := regexp.MustCompile("([0-9]+).([0-9]+)([0-9]?)") 78 | shortVersion := re.FindString(version) 79 | 80 | EnvVersion, err := compare.NewVersion(shortVersion) 81 | if err != nil { 82 | return false 83 | } 84 | 85 | inputVersion, err := compare.NewVersion(input) 86 | if err != nil { 87 | return false 88 | } 89 | 90 | if EnvVersion.LessThan(inputVersion) { 91 | return false 92 | } 93 | 94 | return true // input greater than env so action 95 | } 96 | -------------------------------------------------------------------------------- /pkg/utils/clusterversion_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | func TestIsVersionHigher(t *testing.T) { 9 | 10 | tests := []struct { 11 | Name string 12 | CompareExpected bool 13 | ErrorReason string 14 | clusterversion string 15 | }{ 16 | { 17 | Name: "Compare Cluster Version 4.10.44-rc", 18 | CompareExpected: true, 19 | ErrorReason: "Wrong Comparison, it should be higher than 4.10 ", 20 | clusterversion: "4.10.44-rc", 21 | }, 22 | { 23 | Name: "Compare Cluster Version 4.9.nightly.44-rc", 24 | CompareExpected: false, 25 | ErrorReason: "Wrong Comparison, it should be lower than 4.10", 26 | clusterversion: "4.9.nightly.44-rc", 27 | }, 28 | } 29 | for _, test := range tests { 30 | 31 | defer os.Unsetenv("CLUSTER_VERSION") 32 | 33 | os.Setenv("CLUSTER_VERSION", test.clusterversion) 34 | 35 | CompareResult := IsVersionHigherThan("4.10") 36 | if CompareResult != test.CompareExpected { 37 | t.Errorf("Test [%v] return mismatch: [%v]. Expect %t: Return %+v", test.Name, test.ErrorReason, test.CompareExpected, CompareResult) 38 | } 39 | 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /pkg/utils/healthcheck.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/openshift/cloud-ingress-operator/pkg/ingresscontroller" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // SAhealthcheck will perform a basic call to make sure ingresscontrollers is reachable 12 | // covers: https://github.com/openshift/cloud-ingress-operator/blob/32e50ef2aa8571f9bb60aaf53ed9d1262cc2c083/deploy/20_cloud-ingress-operator_openshift-ingress-operator.Role.yaml#L39-L50 13 | func SAhealthcheck(kclient client.Client) error { 14 | var op ingresscontroller.IngressController 15 | ns := types.NamespacedName{ 16 | Namespace: "openshift-ingress-operator", 17 | Name: "default", 18 | } 19 | return kclient.Get(context.TODO(), ns, &op) 20 | } 21 | -------------------------------------------------------------------------------- /pkg/utils/healthcheck_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/openshift/cloud-ingress-operator/pkg/ingresscontroller" 7 | "github.com/openshift/cloud-ingress-operator/pkg/testutils" 8 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | ) 11 | 12 | func TestSAhealthcheck(t *testing.T) { 13 | ingressCO := &ingresscontroller.IngressController{ 14 | ObjectMeta: v1.ObjectMeta{ 15 | Name: "default", 16 | Namespace: "openshift-ingress-operator", 17 | }, 18 | } 19 | objs := []runtime.Object{ingressCO} 20 | mocks := testutils.NewTestMock(t, objs) 21 | if err := SAhealthcheck(mocks.FakeKubeClient); err != nil { 22 | t.Error("checking ingresscontroller failed:", err) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /pkg/utils/infrastructure.go: -------------------------------------------------------------------------------- 1 | // This package is for low-level utility functions used by both controllers 2 | // and CloudClient interface implementations. 3 | package utils 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "net/url" 9 | 10 | configv1 "github.com/openshift/api/config/v1" 11 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/runtime/schema" 14 | "k8s.io/apimachinery/pkg/types" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | ) 17 | 18 | // GetInfrastructureObject returns the canonical Infrastructure object 19 | func GetInfrastructureObject(kclient client.Client) (*configv1.Infrastructure, error) { 20 | u := &unstructured.Unstructured{} 21 | u.SetGroupVersionKind(schema.GroupVersionKind{ 22 | Group: "", 23 | Version: "config.openshift.io/v1", 24 | Kind: "infrastructure", 25 | }) 26 | ns := types.NamespacedName{ 27 | Namespace: "", 28 | Name: "cluster", 29 | } 30 | err := kclient.Get(context.TODO(), ns, u) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | uContent := u.UnstructuredContent() 36 | var infra *configv1.Infrastructure 37 | err = runtime.DefaultUnstructuredConverter.FromUnstructured(uContent, &infra) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | return infra, nil 43 | } 44 | 45 | // GetClusterBaseDomain returns the installed clsuter's base domain name 46 | func GetClusterBaseDomain(kclient client.Client) (string, error) { 47 | infra, err := GetInfrastructureObject(kclient) 48 | if err != nil { 49 | return "", err 50 | } 51 | serverURL, err := url.Parse(infra.Status.APIServerURL) 52 | if err != nil { 53 | return "", fmt.Errorf("couldn't parse the API server URL from %s: %s", infra.Status.APIServerURL, err) 54 | } 55 | // Trim the leading "api." from the hostname. 56 | return serverURL.Hostname()[4:], nil 57 | } 58 | 59 | // GetClusterName returns the installed cluster's name (max 27 characters) 60 | func GetClusterName(kclient client.Client) (string, error) { 61 | infra, err := GetInfrastructureObject(kclient) 62 | if err != nil { 63 | return "", err 64 | } 65 | return infra.Status.InfrastructureName, nil 66 | } 67 | 68 | // GetPlatformType returns the cloud platform type for the cluster 69 | func GetPlatformType(kclient client.Client) (*configv1.PlatformType, error) { 70 | infra, err := GetInfrastructureObject(kclient) 71 | if err != nil { 72 | return nil, err 73 | } 74 | return &infra.Status.PlatformStatus.Type, nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/utils/infrastructure_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/openshift/cloud-ingress-operator/pkg/testutils" 8 | "k8s.io/apimachinery/pkg/runtime" 9 | ) 10 | 11 | func TestClusterBaseDomain(t *testing.T) { 12 | infraObj := testutils.CreateInfraObject("basename", testutils.DefaultAPIEndpoint, testutils.DefaultAPIEndpoint, testutils.DefaultRegionName) 13 | objs := []runtime.Object{infraObj} 14 | mocks := testutils.NewTestMock(t, objs) 15 | 16 | base, err := GetClusterBaseDomain(mocks.FakeKubeClient) 17 | if err != nil { 18 | t.Fatalf("Could not get cluster base domain name: %v", err) 19 | } 20 | if base != "unit.test" { 21 | t.Fatalf("Base domain mismatch. Expected %s, got %s", "unit.test", base) 22 | } 23 | } 24 | 25 | func TestGetClusterName(t *testing.T) { 26 | clustername := "cluster-test-name" 27 | infraObj := testutils.CreateInfraObject(clustername, testutils.DefaultAPIEndpoint, testutils.DefaultAPIEndpoint, testutils.DefaultRegionName) 28 | objs := []runtime.Object{infraObj} 29 | mocks := testutils.NewTestMock(t, objs) 30 | 31 | name, err := GetClusterName(mocks.FakeKubeClient) 32 | if err != nil { 33 | t.Fatalf("Couldn't get cluster name %v", err) 34 | } 35 | if name != clustername { 36 | t.Fatalf("Expected cluster name to be %s, got %s instead", clustername, name) 37 | } 38 | } 39 | 40 | // None of these should ever occur, but if they did, it'd be nice to know they return an error 41 | func TestNoInfraObj(t *testing.T) { 42 | masterNames := make([]string, 3) 43 | for i := 0; i < 3; i++ { 44 | masterNames[i] = fmt.Sprintf("master-%d", i) 45 | } 46 | machineList, _ := testutils.CreateMachineObjectList(masterNames, "ids", "master", testutils.DefaultRegionName, testutils.DefaultAzName) 47 | objs := []runtime.Object{machineList} 48 | mocks := testutils.NewTestMock(t, objs) 49 | 50 | _, err := GetClusterBaseDomain(mocks.FakeKubeClient) 51 | if err == nil { 52 | t.Fatalf("Expected to get an error from not having an Infrastructure object") 53 | } 54 | _, err = GetClusterName(mocks.FakeKubeClient) 55 | if err == nil { 56 | t.Fatalf("Expected to get an error from not having an Infrastructure object") 57 | } 58 | _, err = GetPlatformType(mocks.FakeKubeClient) 59 | if err == nil { 60 | t.Fatalf("Expected to get an error from not having an Infrastructure object") 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/utils/machines.go: -------------------------------------------------------------------------------- 1 | // This package is for low-level utility functions used by both controllers 2 | // and CloudClient interface implementations. 3 | package utils 4 | 5 | import ( 6 | "context" 7 | "encoding/json" 8 | "fmt" 9 | 10 | machinev1 "github.com/openshift/api/machine/v1" 11 | machineapi "github.com/openshift/api/machine/v1beta1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | const ( 17 | masterMachineLabel string = "machine.openshift.io/cluster-api-machine-role" 18 | machineApiNamespace string = "openshift-machine-api" 19 | cpmsName string = "cluster" 20 | ) 21 | 22 | // GetMasterMachines returns a MachineList object whose .Items can be iterated 23 | // over to perform actions on/with information from each master machine object. 24 | func GetMasterMachines(kclient client.Client) (*machineapi.MachineList, error) { 25 | machineList := &machineapi.MachineList{} 26 | listOptions := []client.ListOption{ 27 | client.InNamespace("openshift-machine-api"), 28 | client.MatchingLabels{masterMachineLabel: "master"}, 29 | } 30 | err := kclient.List(context.TODO(), machineList, listOptions...) 31 | if err != nil { 32 | return nil, err 33 | } 34 | return machineList, nil 35 | } 36 | 37 | // GetControlPlaneMachineSet returns an OSD cluster's CPMS. 38 | func GetControlPlaneMachineSet(kclient client.Client) (*machinev1.ControlPlaneMachineSet, error) { 39 | cpms := &machinev1.ControlPlaneMachineSet{} 40 | key := client.ObjectKey{ 41 | Namespace: machineApiNamespace, 42 | Name: cpmsName, 43 | } 44 | err := kclient.Get(context.TODO(), key, cpms) 45 | if err != nil { 46 | return nil, fmt.Errorf("failed to get controlplanemachineset: %w", err) 47 | } 48 | return cpms, nil 49 | } 50 | 51 | // DeleteCPMS will remove the CPMS of the cluster - in OSD this will trigger the 52 | // CPMS to be recreated in an inactive state. 53 | func DeleteCPMS(ctx context.Context, kclient client.Client, cpms *machinev1.ControlPlaneMachineSet) error { 54 | return kclient.Delete(ctx, cpms) 55 | } 56 | 57 | // SetCPMSActive will set a CPMS back to active. 58 | // This is required after calling DeleteCPMS, as it will recreate the CPMS in an inactive state. 59 | func SetCPMSActive(ctx context.Context, kclient client.Client, cpms *machinev1.ControlPlaneMachineSet) error { 60 | patch := client.MergeFrom(cpms.DeepCopy()) 61 | cpms.Spec.State = machinev1.ControlPlaneMachineSetStateActive 62 | return kclient.Patch(ctx, cpms, patch) 63 | } 64 | 65 | func ConvertFromRawExtension[T any](extension *runtime.RawExtension) (*T, error) { 66 | t := new(T) 67 | if extension == nil { 68 | return t, fmt.Errorf("can not convert nil to type") 69 | } 70 | if err := json.Unmarshal(extension.Raw, &t); err != nil { 71 | return t, fmt.Errorf("error unmarshalling providerSpec: %v", err) 72 | } 73 | return t, nil 74 | } 75 | 76 | func ConvertToRawBytes(t interface{}) ([]byte, error) { 77 | raw, err := json.Marshal(t) 78 | if err != nil { 79 | return nil, fmt.Errorf("could not marshal provided type: %v", err) 80 | } 81 | return raw, nil 82 | } 83 | -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_kube-apiserver.Role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-kube-apiserver 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - services/finalizers 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - create 17 | - update 18 | - delete 19 | - apiGroups: 20 | - apps 21 | resources: 22 | - deployments 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - apiGroups: 28 | - cloudingress.managed.openshift.io 29 | resources: 30 | - '*' 31 | verbs: 32 | - create 33 | - delete 34 | - get 35 | - list 36 | - patch 37 | - update 38 | - watch 39 | 40 | -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_kube-apiserver.RoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-kube-apiserver 6 | subjects: 7 | - kind: ServiceAccount 8 | name: cloud-ingress-operator 9 | namespace: openshift-cloud-ingress-operator 10 | roleRef: 11 | kind: Role 12 | name: cloud-ingress-operator 13 | namespace: openshift-kube-apiserver 14 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_machine.Role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-machine-api 6 | rules: 7 | - apiGroups: 8 | - machine.openshift.io 9 | resources: 10 | - machines 11 | - machinesets 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - patch 17 | - update 18 | - apiGroups: 19 | - machine.openshift.io 20 | resources: 21 | - controlplanemachinesets 22 | verbs: 23 | - create 24 | - delete 25 | - get 26 | - list 27 | - patch 28 | - update 29 | - watch 30 | - apiGroups: 31 | - "" 32 | resources: 33 | - services 34 | verbs: 35 | - get 36 | - list 37 | - watch 38 | - apiGroups: 39 | - apps 40 | resources: 41 | - deployments 42 | verbs: 43 | - get 44 | - list 45 | - watch 46 | - apiGroups: 47 | - cloudingress.managed.openshift.io 48 | resources: 49 | - '*' 50 | verbs: 51 | - create 52 | - delete 53 | - get 54 | - list 55 | - patch 56 | - update 57 | - watch 58 | 59 | -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_machine.RoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-machine-api 6 | subjects: 7 | - kind: ServiceAccount 8 | name: cloud-ingress-operator 9 | namespace: openshift-cloud-ingress-operator 10 | roleRef: 11 | kind: Role 12 | name: cloud-ingress-operator 13 | namespace: openshift-machine-api 14 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_openshift-ingress-operator.Role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-ingress-operator 6 | rules: 7 | - apiGroups: 8 | - operator.openshift.io 9 | resources: 10 | - ingresscontrollers 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - patch 16 | - delete 17 | - create 18 | - update 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - services 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - apiGroups: 28 | - apps 29 | resources: 30 | - deployments 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiGroups: 36 | - cloudingress.managed.openshift.io 37 | resources: 38 | - '*' 39 | verbs: 40 | - create 41 | - delete 42 | - get 43 | - list 44 | - patch 45 | - update 46 | - watch 47 | 48 | -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_openshift-ingress-operator.RoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-ingress-operator 6 | subjects: 7 | - kind: ServiceAccount 8 | name: cloud-ingress-operator 9 | namespace: openshift-cloud-ingress-operator 10 | roleRef: 11 | kind: Role 12 | name: cloud-ingress-operator 13 | namespace: openshift-ingress-operator 14 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_openshift-ingress.Role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - services/finalizers 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - create 17 | - update 18 | - apiGroups: 19 | - apps 20 | resources: 21 | - deployments 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | - apiGroups: 27 | - cloudingress.managed.openshift.io 28 | resources: 29 | - '*' 30 | verbs: 31 | - create 32 | - delete 33 | - get 34 | - list 35 | - patch 36 | - update 37 | - watch 38 | 39 | -------------------------------------------------------------------------------- /resources/20_cloud-ingress-operator_openshift-ingress.RoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: cloud-ingress-operator 5 | namespace: openshift-ingress 6 | subjects: 7 | - kind: ServiceAccount 8 | name: cloud-ingress-operator 9 | namespace: openshift-cloud-ingress-operator 10 | roleRef: 11 | kind: Role 12 | name: cloud-ingress-operator 13 | namespace: openshift-ingress 14 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /test/e2e/Dockerfile: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_9_1.24 as builder 3 | WORKDIR /go/src/github.com/openshift/cloud-ingress-operator/ 4 | COPY . . 5 | RUN CGO_ENABLED=0 GOFLAGS="-mod=mod" go test ./test/e2e -v -c --tags=osde2e -o /e2e.test 6 | 7 | FROM registry.access.redhat.com/ubi8/ubi-minimal:latest 8 | COPY --from=builder ./e2e.test e2e.test 9 | ENTRYPOINT [ "/e2e.test" ] 10 | -------------------------------------------------------------------------------- /test/e2e/README.md: -------------------------------------------------------------------------------- 1 | ## Locally running e2e test suite 2 | When updating your operator it's beneficial to add e2e tests for new functionality AND ensure existing functionality is not breaking using e2e tests. 3 | To do this, following steps are recommended 4 | 5 | 1. Run "make e2e-binary-build" to make sure e2e tests build 6 | 2. Deploy your new version of operator in a test cluster 7 | 3. Run "go install github.com/onsi/ginkgo/ginkgo@latest" 8 | 4. Get kubeadmin credentials from your cluster using 9 | 10 | ocm get /api/clusters_mgmt/v1/clusters/(cluster-id)/credentials | jq -r .kubeconfig > /(path-to)/kubeconfig 11 | 12 | 5. Run test suite using 13 | 14 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 15 | -------------------------------------------------------------------------------- /test/e2e/cloud_ingress_operator_runner_test.go: -------------------------------------------------------------------------------- 1 | // THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | //go:build osde2e 3 | 4 | package osde2etests 5 | 6 | import ( 7 | "os" 8 | "path/filepath" 9 | "testing" 10 | 11 | . "github.com/onsi/ginkgo/v2" 12 | . "github.com/onsi/gomega" 13 | ) 14 | 15 | const ( 16 | testResultsDirectory = "/test-run-results" 17 | jUnitOutputFilename = "junit-cloud-ingress-operator.xml" 18 | ) 19 | 20 | // Test entrypoint. osde2e runs this as a test suite on test pod. 21 | func TestCloudIngressOperator(t *testing.T) { 22 | RegisterFailHandler(Fail) 23 | suiteConfig, reporterConfig := GinkgoConfiguration() 24 | if _, ok := os.LookupEnv("DISABLE_JUNIT_REPORT"); !ok { 25 | reporterConfig.JUnitReport = filepath.Join(testResultsDirectory, jUnitOutputFilename) 26 | } 27 | RunSpecs(t, "Cloud Ingress Operator", suiteConfig, reporterConfig) 28 | } 29 | -------------------------------------------------------------------------------- /test/e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-cloud-ingress-operator-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | securityContext: 60 | runAsNonRoot: true 61 | allowPrivilegeEscalation: false 62 | capabilities: 63 | drop: ["ALL"] 64 | seccompProfile: 65 | type: RuntimeDefault 66 | env: 67 | - name: AD_HOC_TEST_IMAGES 68 | value: ${TEST_IMAGE}:${IMAGE_TAG} 69 | - name: OCM_CLIENT_ID 70 | value: ${OCM_CLIENT_ID} 71 | - name: OCM_CLIENT_SECRET 72 | value: ${OCM_CLIENT_SECRET} 73 | - name: OCM_CCS 74 | value: ${OCM_CCS} 75 | - name: AWS_ACCESS_KEY_ID 76 | value: ${AWS_ACCESS_KEY_ID} 77 | - name: AWS_SECRET_ACCESS_KEY 78 | value: ${AWS_SECRET_ACCESS_KEY} 79 | - name: CLOUD_PROVIDER_REGION 80 | value: ${CLOUD_PROVIDER_REGION} 81 | - name: GCP_CREDS_JSON 82 | value: ${GCP_CREDS_JSON} 83 | - name: LOG_BUCKET 84 | value: ${LOG_BUCKET} 85 | - name: USE_EXISTING_CLUSTER 86 | value: ${USE_EXISTING_CLUSTER} 87 | - name: CAD_PAGERDUTY_ROUTING_KEY 88 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 89 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | // Place any runtime dependencies as imports in this file. 4 | // Go modules will be forced to download and install them. 5 | package tools 6 | --------------------------------------------------------------------------------