├── .azure
├── build-pipeline.yaml
├── cve-pipeline.yaml
├── release-pipeline.yaml
├── scripts
│ ├── install_cosign.sh
│ ├── install_syft.sh
│ ├── install_yq.sh
│ ├── push-to-nexus.sh
│ ├── release-artifacts.sh
│ ├── release_files_check.sh
│ ├── settings.xml
│ ├── setup-helm.sh
│ ├── setup-kind.sh
│ └── uncommitted-changes.sh
└── templates
│ ├── jobs
│ ├── build_container.yaml
│ ├── build_java.yaml
│ ├── deploy_java.yaml
│ ├── push_container.yaml
│ ├── push_helm_chart.yaml
│ └── run_systemtests.yaml
│ └── steps
│ ├── maven_cache.yaml
│ └── prerequisites
│ ├── install_cosign.yaml
│ ├── install_docker.yaml
│ ├── install_helm.yaml
│ ├── install_java.yaml
│ ├── install_kind.yaml
│ ├── install_syft.yaml
│ └── install_yq.yaml
├── .checkstyle
├── checkstyle.xml
└── suppressions.xml
├── .checksums
├── .github
└── workflows
│ └── codeql-analysis.yml
├── .gitignore
├── .spotbugs
└── spotbugs-exclude.xml
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── GOVERNANCE.md
├── LICENSE
├── MAINTAINERS.md
├── Makefile
├── Makefile.docker
├── Makefile.maven
├── Makefile.os
├── README.md
├── api
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── io
│ │ └── strimzi
│ │ └── kafka
│ │ └── access
│ │ ├── internal
│ │ └── StatusUtils.java
│ │ └── model
│ │ ├── BindingStatus.java
│ │ ├── KafkaAccess.java
│ │ ├── KafkaAccessSpec.java
│ │ ├── KafkaAccessStatus.java
│ │ ├── KafkaReference.java
│ │ └── KafkaUserReference.java
│ └── test
│ └── java
│ └── internal
│ └── StatusUtilsTest.java
├── bin
└── access_operator_run.sh
├── development-docs
├── DEV_GUIDE.md
└── TESTING.md
├── examples
├── kafka-access-with-user.yaml
└── kafka-access.yaml
├── helm-charts
└── helm3
│ └── strimzi-access-operator
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── crds
│ └── 040-Crd-kafkaaccess.yaml
│ ├── templates
│ ├── 010-ServiceAccount.yaml
│ ├── 020-ClusterRole.yaml
│ ├── 030-ClusterRoleBinding.yaml
│ └── 050-Deployment.yaml
│ └── values.yaml
├── install
├── 000-Namespace.yaml
├── 010-ServiceAccount.yaml
├── 020-ClusterRole.yaml
├── 030-ClusterRoleBinding.yaml
├── 040-Crd-kafkaaccess.yaml
└── 050-Deployment.yaml
├── operator
├── pom.xml
└── src
│ ├── assembly
│ └── dist.xml
│ ├── main
│ ├── java
│ │ └── io
│ │ │ └── strimzi
│ │ │ └── kafka
│ │ │ └── access
│ │ │ ├── KafkaAccessOperator.java
│ │ │ ├── KafkaAccessReconciler.java
│ │ │ ├── SecretDependentResource.java
│ │ │ ├── internal
│ │ │ ├── CustomResourceParseException.java
│ │ │ ├── KafkaAccessMapper.java
│ │ │ ├── KafkaListener.java
│ │ │ ├── KafkaParser.java
│ │ │ ├── KafkaUserData.java
│ │ │ └── MissingKubernetesResourceException.java
│ │ │ └── server
│ │ │ └── HealthServlet.java
│ └── resources
│ │ └── log4j2.properties
│ └── test
│ └── java
│ └── io
│ └── strimzi
│ └── kafka
│ └── access
│ ├── Base64Encoder.java
│ ├── KafkaAccessReconcilerTest.java
│ ├── ResourceProvider.java
│ ├── SecretDependentResourceTest.java
│ └── internal
│ ├── KafkaAccessMapperTest.java
│ ├── KafkaListenerTest.java
│ ├── KafkaParserTest.java
│ ├── KafkaParserWithUserTest.java
│ └── KafkaUserDataTest.java
├── packaging
├── examples
│ ├── kafka-access-with-user.yaml
│ └── kafka-access.yaml
├── helm-charts
│ └── helm3
│ │ ├── Makefile
│ │ └── strimzi-access-operator
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── crds
│ │ └── 040-Crd-kafkaaccess.yaml
│ │ ├── templates
│ │ ├── 010-ServiceAccount.yaml
│ │ ├── 020-ClusterRole.yaml
│ │ ├── 030-ClusterRoleBinding.yaml
│ │ └── 050-Deployment.yaml
│ │ └── values.yaml
└── install
│ ├── 000-Namespace.yaml
│ ├── 010-ServiceAccount.yaml
│ ├── 020-ClusterRole.yaml
│ ├── 030-ClusterRoleBinding.yaml
│ ├── 040-Crd-kafkaaccess.yaml
│ └── 050-Deployment.yaml
├── pom.xml
├── release.version
└── systemtest
├── pom.xml
└── src
├── main
├── java
│ └── io
│ │ └── strimzi
│ │ └── kafka
│ │ └── access
│ │ ├── Environment.java
│ │ ├── TestConstants.java
│ │ ├── TestStorage.java
│ │ ├── installation
│ │ ├── BundleInstallation.java
│ │ ├── HelmInstallation.java
│ │ └── SetupAccessOperator.java
│ │ ├── resources
│ │ └── KafkaAccessType.java
│ │ ├── templates
│ │ ├── KafkaAccessTemplates.java
│ │ ├── KafkaTemplates.java
│ │ ├── KafkaUserTemplates.java
│ │ ├── ListenerTemplates.java
│ │ └── SecretTemplates.java
│ │ └── utils
│ │ ├── Base64Utils.java
│ │ ├── KafkaAccessUtils.java
│ │ ├── ListenerUtils.java
│ │ └── SecretUtils.java
└── resources
│ └── log4j2.properties
└── test
└── java
└── io
└── strimzi
└── kafka
└── access
├── AbstractST.java
└── KafkaAccessOperatorST.java
/.azure/build-pipeline.yaml:
--------------------------------------------------------------------------------
1 | # Triggers
2 | trigger:
3 | branches:
4 | include:
5 | - 'main'
6 | - 'release-*'
7 | pr:
8 | autoCancel: true
9 | branches:
10 | include:
11 | - '*'
12 |
13 | # Stages
14 | stages:
15 | - stage: java_build
16 | displayName: Java build
17 | jobs:
18 | - template: 'templates/jobs/build_java.yaml'
19 | - stage: container_build
20 | displayName: Prepare Container
21 | dependsOn:
22 | - java_build
23 | jobs:
24 | - template: 'templates/jobs/build_container.yaml'
25 | parameters:
26 | artifactSource: 'current'
27 | artifactProject: 'strimzi'
28 | artifactPipeline: ''
29 | artifactRunVersion: ''
30 | artifactRunId: ''
31 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le']
32 | - stage: run_systemtests
33 | displayName: Run System-tests
34 | dependsOn:
35 | - container_build
36 | condition: succeeded()
37 | variables:
38 | docker_org: strimzi
39 | docker_registry: localhost:5000
40 | jobs:
41 | - template: 'templates/jobs/run_systemtests.yaml'
42 | parameters:
43 | # The system tests should currently always run only amd64 on Azure since we do not have any other environments
44 | # available. So when adding support for a new platform, you should not add it here unless you also add the
45 | # system tests support for it.
46 | architectures: ['amd64']
47 | - stage: container_publish
48 | displayName: Publish Container
49 | dependsOn:
50 | - run_systemtests
51 | condition: and(succeeded(), eq(variables['build.sourceBranch'], 'refs/heads/main'))
52 | jobs:
53 | - template: 'templates/jobs/push_container.yaml'
54 | parameters:
55 | dockerTag: 'latest'
56 | artifactSource: 'current'
57 | artifactProject: 'strimzi'
58 | artifactPipeline: ''
59 | artifactRunVersion: ''
60 | artifactRunId: ''
61 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le']
62 | - stage: java_deploy
63 | displayName: Deploy Java
64 | dependsOn:
65 | - run_systemtests
66 | condition: and(succeeded(), or(eq(variables['build.sourceBranch'], 'refs/heads/main'), startsWith(variables['build.sourceBranch'], 'refs/heads/release-')))
67 | jobs:
68 | - template: 'templates/jobs/deploy_java.yaml'
69 | parameters:
70 | dockerTag: 'latest'
71 | artifactSource: 'current'
72 | artifactProject: 'strimzi'
73 | artifactPipeline: ''
74 | artifactRunVersion: ''
75 | artifactRunId: ''
76 |
--------------------------------------------------------------------------------
/.azure/cve-pipeline.yaml:
--------------------------------------------------------------------------------
1 | # Triggers
2 | trigger: none
3 | pr: none
4 |
5 | # Parameters
6 | parameters:
7 | - name: releaseVersion
8 | displayName: Release Version
9 | type: string
10 | - name: useSuffix
11 | displayName: Build suffixed images
12 | type: boolean
13 | default: true
14 | - name: releaseSuffix
15 | displayName: Release Suffix
16 | type: number
17 | - name: sourcePipelineId
18 | displayName: Pipeline ID of the source build
19 | type: number
20 | default: 51
21 | values:
22 | - 51
23 | - name: sourceBuildId
24 | displayName: Build ID of the source build
25 | type: number
26 |
27 | # Stages
28 | stages:
29 | - stage: container_build
30 | displayName: Prepare Container
31 | jobs:
32 | - template: 'templates/jobs/build_container.yaml'
33 | parameters:
34 | artifactSource: 'specific'
35 | artifactProject: 'strimzi'
36 | artifactPipeline: '${{ parameters.sourcePipelineId }}'
37 | artifactRunVersion: 'specific'
38 | artifactRunId: '${{ parameters.sourceBuildId }}'
39 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le']
40 | - stage: containers_publish_with_suffix
41 | displayName: Publish Containers for ${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }}
42 | dependsOn:
43 | - container_build
44 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-'))
45 | jobs:
46 | - template: 'templates/jobs/push_container.yaml'
47 | parameters:
48 | dockerTag: '${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }}'
49 | artifactSource: 'current'
50 | artifactProject: 'strimzi'
51 | artifactPipeline: ''
52 | artifactRunVersion: ''
53 | artifactRunId: ''
54 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le']
55 | - stage: manual_validation
56 | displayName: Validate container before pushing container as ${{ parameters.releaseVersion }}
57 | dependsOn:
58 | - containers_publish_with_suffix
59 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-'))
60 | jobs:
61 | - job: waitForValidation
62 | displayName: Wait for container image validation
63 | pool: server
64 | timeoutInMinutes: 4320 # job times out in 3 days
65 | steps:
66 | - task: ManualValidation@0
67 | timeoutInMinutes: 4310 # task times out in 1 day
68 | inputs:
69 | notifyUsers: |
70 | github@scholzj.com
71 | xstejs24@gmail.com
72 | instructions: 'Please validate the container image'
73 | onTimeout: 'reject'
74 | - stage: containers_publish
75 | displayName: Publish Containers for ${{ parameters.releaseVersion }}
76 | dependsOn:
77 | - manual_validation
78 | - containers_publish_with_suffix
79 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-'))
80 | jobs:
81 | - template: 'templates/jobs/push_container.yaml'
82 | parameters:
83 | dockerTag: '${{ parameters.releaseVersion }}'
84 | artifactSource: 'current'
85 | artifactProject: 'strimzi'
86 | artifactPipeline: ''
87 | artifactRunVersion: ''
88 | artifactRunId: ''
89 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le']
--------------------------------------------------------------------------------
/.azure/release-pipeline.yaml:
--------------------------------------------------------------------------------
1 | # Triggers
2 | trigger: none
3 | pr: none
4 |
5 | # Parameters
6 | parameters:
7 | - name: releaseVersion
8 | displayName: Release Version
9 | type: string
10 | - name: useSuffix
11 | displayName: Build suffixed images
12 | type: boolean
13 | default: true
14 | - name: releaseSuffix
15 | displayName: Release Suffix
16 | type: number
17 | - name: sourcePipelineId
18 | displayName: Pipeline ID of the source build
19 | type: number
20 | default: 51
21 | values:
22 | - 51
23 | - name: sourceBuildId
24 | displayName: Build ID of the source build
25 | type: number
26 |
27 | # Stages
28 | stages:
29 | - stage: release_artifacts
30 | displayName: Release artifacts for ${{ parameters.releaseVersion }}
31 | condition: startsWith(variables['build.sourceBranch'], 'refs/heads/release-')
32 | jobs:
33 | - job: 'release_artifacts'
34 | displayName: 'Release Artifacts'
35 | strategy:
36 | matrix:
37 | 'java-17':
38 | image: 'Ubuntu-22.04'
39 | jdk_version: '17'
40 | # Set timeout for jobs
41 | timeoutInMinutes: 60
42 | # Base system
43 | pool:
44 | vmImage: 'Ubuntu-22.04'
45 | # Pipeline steps
46 | steps:
47 | - template: 'templates/steps/prerequisites/install_java.yaml'
48 | parameters:
49 | JDK_VERSION: $(jdk_version)
50 | - template: "templates/steps/prerequisites/install_helm.yaml"
51 | - bash: ".azure/scripts/release-artifacts.sh"
52 | env:
53 | BUILD_REASON: $(Build.Reason)
54 | BRANCH: $(Build.SourceBranch)
55 | RELEASE_VERSION: '${{ parameters.releaseVersion }}'
56 | MVN_ARGS: '-B'
57 | displayName: "Prepare release artifacts"
58 | - publish: $(System.DefaultWorkingDirectory)/strimzi-access-operator-${{ parameters.releaseVersion }}.tar.gz
59 | artifact: ReleaseTarGzArchive
60 | - publish: $(System.DefaultWorkingDirectory)/strimzi-access-operator-${{ parameters.releaseVersion }}.zip
61 | artifact: ReleaseZipArchive
62 | - publish: $(System.DefaultWorkingDirectory)/strimzi-access-operator-helm-3-chart-${{ parameters.releaseVersion }}.tgz
63 | artifact: HelmChartArchive
64 | - stage: containers_publish_with_suffix
65 | displayName: Publish Containers for ${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }}
66 | dependsOn:
67 | - release_artifacts
68 | condition: and(succeeded(), startsWith(variables['build.sourceBranch'], 'refs/heads/release-'), eq('${{ parameters.useSuffix }}', 'true'))
69 | jobs:
70 | - template: 'templates/jobs/push_container.yaml'
71 | parameters:
72 | dockerTag: '${{ parameters.releaseVersion }}-${{ parameters.releaseSuffix }}'
73 | artifactSource: 'specific'
74 | artifactProject: 'strimzi'
75 | artifactPipeline: '${{ parameters.sourcePipelineId }}'
76 | artifactRunVersion: 'specific'
77 | artifactRunId: '${{ parameters.sourceBuildId }}'
78 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le']
79 | - stage: containers_publish
80 | displayName: Publish Containers for ${{ parameters.releaseVersion }}
81 | dependsOn:
82 | - release_artifacts
83 | - containers_publish_with_suffix
84 | condition: and(in(dependencies.containers_publish_with_suffix.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'), startsWith(variables['build.sourceBranch'], 'refs/heads/release-'))
85 | jobs:
86 | - template: 'templates/jobs/push_container.yaml'
87 | parameters:
88 | dockerTag: '${{ parameters.releaseVersion }}'
89 | artifactSource: 'specific'
90 | artifactProject: 'strimzi'
91 | artifactPipeline: '${{ parameters.sourcePipelineId }}'
92 | artifactRunVersion: 'specific'
93 | artifactRunId: '${{ parameters.sourceBuildId }}'
94 | architectures: ['amd64', 'arm64', 's390x', 'ppc64le']
95 | # Publishes the Helm Chart as an OCI artifact to Quay.io
96 | - stage: helm_as_oci_publish
97 | displayName: Publish Helm Chart as OCI artifact
98 | dependsOn:
99 | - containers_publish
100 | condition: and(in(dependencies.containers_publish.result, 'Succeeded', 'SucceededWithIssues'), startsWith(variables['build.sourceBranch'], 'refs/heads/release-'))
101 | jobs:
102 | - template: 'templates/jobs/push_helm_chart.yaml'
103 | parameters:
104 | releaseVersion: '${{ parameters.releaseVersion }}'
105 | artifactSource: 'current'
106 | artifactProject: 'strimzi'
107 | artifactPipeline: ''
108 | artifactRunVersion: ''
109 | artifactRunId: ''
--------------------------------------------------------------------------------
/.azure/scripts/install_cosign.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | readonly VERSION="2.2.0"
4 |
5 | ARCH=$1
6 | if [ -z "$ARCH" ]; then
7 | ARCH="amd64"
8 | fi
9 |
10 | curl -L https://github.com/sigstore/cosign/releases/download/v${VERSION}/cosign-linux-${ARCH} > cosign && chmod +x cosign
11 | sudo mv cosign /usr/bin/
12 |
--------------------------------------------------------------------------------
/.azure/scripts/install_syft.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | readonly VERSION="0.90.0"
5 |
6 | ARCH=$1
7 | if [ -z "$ARCH" ]; then
8 | ARCH="amd64"
9 | fi
10 |
11 | wget https://github.com/anchore/syft/releases/download/v${VERSION}/syft_${VERSION}_linux_${ARCH}.tar.gz -O syft.tar.gz
12 | tar xf syft.tar.gz -C /tmp
13 | chmod +x /tmp/syft
14 | sudo mv /tmp/syft /usr/bin
15 |
--------------------------------------------------------------------------------
/.azure/scripts/install_yq.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ARCH=$1
4 | if [ -z "$ARCH" ]; then
5 | ARCH="amd64"
6 | fi
7 |
8 | curl -L https://github.com/mikefarah/yq/releases/download/v4.6.3/yq_linux_${ARCH} > yq && chmod +x yq
9 | sudo cp yq /usr/bin/
10 |
--------------------------------------------------------------------------------
/.azure/scripts/push-to-nexus.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | echo "Build reason: ${BUILD_REASON}"
6 | echo "Source branch: ${BRANCH}"
7 |
8 | function cleanup() {
9 | rm -rf signing.gpg
10 | gpg --delete-keys
11 | gpg --delete-secret-keys
12 | }
13 |
14 | # Run the cleanup on failure / exit
15 | trap cleanup EXIT
16 |
17 | export GPG_TTY=$(tty)
18 | echo $GPG_SIGNING_KEY | base64 -d > signing.gpg
19 | gpg --batch --import signing.gpg
20 |
21 | GPG_EXECUTABLE=gpg mvn $MVN_ARGS -DskipTests -s ./.azure/scripts/settings.xml -pl ./,api -P ossrh deploy
22 |
23 | cleanup
--------------------------------------------------------------------------------
/.azure/scripts/release-artifacts.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | echo "Build reason: ${BUILD_REASON}"
5 | echo "Source branch: ${BRANCH}"
6 |
7 | echo "Releasing artifacts for ${RELEASE_VERSION}"
8 |
9 | make release
10 |
--------------------------------------------------------------------------------
/.azure/scripts/release_files_check.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eu
4 |
5 | source ./.checksums
6 | SHA1SUM=sha1sum
7 |
8 | RETURN_CODE=0
9 |
10 | # Arrays holding the relevant information for each directory
11 | ITEMS=("install" "examples" "helm-charts")
12 | CHECKSUM_VARS=("INSTALL_CHECKSUM" "EXAMPLES_CHECKSUM" "HELM_CHART_CHECKSUM")
13 | MAKE_TARGETS=("checksum_install" "checksum_examples" "checksum_helm")
14 | DIRECTORIES=("./install" "./examples" "./helm-charts")
15 | PACKAGING_DIRS=("./packaging/install" "./packaging/examples" "./packaging/helm-charts")
16 |
17 | for i in "${!ITEMS[@]}"; do
18 | NAME="${ITEMS[$i]}"
19 | CHECKSUM_VAR="${CHECKSUM_VARS[$i]}"
20 | MAKE_TARGET="${MAKE_TARGETS[$i]}"
21 | DIRECTORY="${DIRECTORIES[$i]}"
22 | PACKAGING_DIR="${PACKAGING_DIRS[$i]}"
23 |
24 | CHECKSUM="$(make --no-print-directory $MAKE_TARGET)"
25 | EXPECTED_CHECKSUM="${!CHECKSUM_VAR}"
26 |
27 | if [ "$CHECKSUM" != "$EXPECTED_CHECKSUM" ]; then
28 | echo "ERROR: Checksums of $DIRECTORY do not match."
29 | echo " Expected: ${EXPECTED_CHECKSUM}"
30 | echo " Actual: ${CHECKSUM}"
31 | echo "If your changes to $DIRECTORY are related to a new release, please update the checksums. Otherwise, please change only the files in the $PACKAGING_DIR directory. "
32 | RETURN_CODE=$((RETURN_CODE+1))
33 | else
34 | echo "Checksums of $DIRECTORY match => OK"
35 | fi
36 | done
37 |
38 | exit $RETURN_CODE
--------------------------------------------------------------------------------
/.azure/scripts/settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ossrh
5 | ${env.NEXUS_USERNAME}
6 | ${env.NEXUS_PASSWORD}
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.azure/scripts/setup-helm.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -x
3 |
4 | TEST_HELM3_VERSION=${TEST_HELM3_VERSION:-'v3.16.2'}
5 |
6 | function install_helm3 {
7 | export HELM_INSTALL_DIR=/usr/bin
8 | curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
9 | # we need to modify the script with a different path because on the Azure pipelines the HELM_INSTALL_DIR env var is not honoured
10 | sed -i 's#/usr/local/bin#/usr/bin#g' get_helm.sh
11 | chmod 700 get_helm.sh
12 |
13 | echo "Installing helm 3..."
14 | sudo ./get_helm.sh --version "${TEST_HELM3_VERSION}"
15 |
16 | echo "Verifying the installation of helm binary..."
17 | # run a proper helm command instead of, for example, "which helm", to verify that we can call the binary
18 | helm --help
19 | helmCommandOutput=$?
20 |
21 | if [ $helmCommandOutput != 0 ]; then
22 | echo "helm binary hasn't been installed properly - exiting..."
23 | exit 1
24 | fi
25 | }
26 |
27 | install_helm3
28 |
--------------------------------------------------------------------------------
/.azure/scripts/uncommitted-changes.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | echo "Build reason: ${BUILD_REASON}"
5 | echo "Source branch: ${BRANCH}"
6 |
7 | make crd_install
8 | make helm_install
9 |
10 | CHANGED_DERIVED=$(git diff --name-status -- packaging/install/ packaging/helm-charts/)
11 | GENERATED_FILES=$(git ls-files --other --exclude-standard -- packaging/install/ packaging/helm-charts/)
12 | if [ -n "$CHANGED_DERIVED" ] || [ -n "$GENERATED_FILES" ] ; then
13 | if [ -n "$CHANGED_DERIVED" ] ; then
14 | echo "ERROR: Uncommitted changes in derived resources:"
15 | echo "$CHANGED_DERIVED"
16 | fi
17 |
18 | if [ -n "$GENERATED_FILES" ] ; then
19 | echo "ERROR: Uncommitted changes in generated resources:"
20 | echo "$GENERATED_FILES"
21 | fi
22 |
23 | echo "Run the following to add up-to-date resources:"
24 | echo " make crd_install \\"
25 | echo " && make helm_install \\"
26 | echo " && git add packaging/ \\"
27 | echo " && git commit -s -m 'Update derived resources'"
28 | exit 1
29 | fi
30 |
--------------------------------------------------------------------------------
/.azure/templates/jobs/build_container.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'build_container'
3 | displayName: 'Build'
4 | # Strategy for the job
5 | strategy:
6 | matrix:
7 | ${{ each arch in parameters.architectures }}:
8 | ${{ arch }}:
9 | arch: ${{ arch }}
10 | # Set timeout for jobs
11 | timeoutInMinutes: 60
12 | # Base system
13 | pool:
14 | vmImage: 'Ubuntu-22.04'
15 | # Pipeline steps
16 | steps:
17 | - template: "../steps/prerequisites/install_docker.yaml"
18 | - task: DownloadPipelineArtifact@2
19 | inputs:
20 | source: '${{ parameters.artifactSource }}'
21 | artifact: Binary
22 | path: $(System.DefaultWorkingDirectory)/
23 | project: '${{ parameters.artifactProject }}'
24 | pipeline: '${{ parameters.artifactPipeline }}'
25 | runVersion: '${{ parameters.artifactRunVersion }}'
26 | runId: '${{ parameters.artifactRunId }}'
27 | - bash: tar -xvf target.tar
28 | displayName: "Untar the target directory"
29 | - bash: "make docker_build docker_save"
30 | env:
31 | DOCKER_BUILDKIT: 1
32 | BUILD_REASON: $(Build.Reason)
33 | BRANCH: $(Build.SourceBranch)
34 | DOCKER_REGISTRY: "quay.io"
35 | DOCKER_ORG: "strimzi"
36 | DOCKER_ARCHITECTURE: $(arch)
37 | displayName: "Build container - $(arch)"
38 | - publish: $(System.DefaultWorkingDirectory)/access-operator-container-$(arch).tar.gz
39 | artifact: Container-$(arch)
40 |
--------------------------------------------------------------------------------
/.azure/templates/jobs/build_java.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'build_and_test_java'
3 | displayName: 'Build & Test'
4 | # Strategy for the job
5 | strategy:
6 | matrix:
7 | 'java-17':
8 | image: 'Ubuntu-22.04'
9 | jdk_version: '17'
10 | # Set timeout for jobs
11 | timeoutInMinutes: 60
12 | # Base system
13 | pool:
14 | vmImage: $(image)
15 | # Variables
16 | variables:
17 | MVN_CACHE_FOLDER: $(HOME)/.m2/repository
18 | MVN_ARGS: '-e -V -B'
19 | # Pipeline steps
20 | steps:
21 | # Get cached Maven repository
22 | - template: "../steps/maven_cache.yaml"
23 | - template: "../steps/prerequisites/install_yq.yaml"
24 | - template: '../steps/prerequisites/install_java.yaml'
25 | parameters:
26 | JDK_VERSION: $(jdk_version)
27 | # Build the Java code without tests
28 | - bash: "make java_install"
29 | displayName: "Build Java code"
30 | env:
31 | MVN_ARGS: "-DskipTests -e -V -B"
32 | - bash: "make spotbugs"
33 | displayName: "Run Spotbugs"
34 | env:
35 | MVN_ARGS: "-e -V -B"
36 | - bash: "make java_verify"
37 | displayName: "Build & Test Java"
38 | env:
39 | BUILD_REASON: $(Build.Reason)
40 | BRANCH: $(Build.SourceBranch)
41 | MVN_ARGS: "-e -V -B"
42 | - bash: "make release_files_check"
43 | displayName: "Check released files"
44 | - bash: ".azure/scripts/uncommitted-changes.sh"
45 | displayName: "Check for uncommitted files"
46 | # We have to TAR the target directory to maintain the permissions of
47 | # the files which would otherwise change when downloading the artifact
48 | - bash: tar -cvpf target.tar ./operator/target
49 | displayName: "Tar the target directory"
50 | - publish: $(System.DefaultWorkingDirectory)/target.tar
51 | artifact: Binary
52 | - task: PublishTestResults@2
53 | inputs:
54 | testResultsFormat: JUnit
55 | testResultsFiles: '**/TEST-*.xml'
56 | testRunTitle: "Unit & Integration tests"
57 | condition: always()
--------------------------------------------------------------------------------
/.azure/templates/jobs/deploy_java.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'deploy_java'
3 | displayName: 'Deploy Java'
4 | # Set timeout for jobs
5 | timeoutInMinutes: 60
6 | # Strategy for the job
7 | strategy:
8 | matrix:
9 | 'java-17':
10 | image: 'Ubuntu-22.04'
11 | jdk_version: '17'
12 | # Base system
13 | pool:
14 | vmImage: $(image)
15 | # Pipeline steps
16 | steps:
17 | - template: "../steps/maven_cache.yaml"
18 | - template: '../steps/prerequisites/install_java.yaml'
19 | parameters:
20 | JDK_VERSION: $(jdk_version)
21 | - task: DownloadPipelineArtifact@2
22 | inputs:
23 | source: '${{ parameters.artifactSource }}'
24 | artifact: Binary
25 | path: $(System.DefaultWorkingDirectory)/
26 | project: '${{ parameters.artifactProject }}'
27 | pipeline: '${{ parameters.artifactPipeline }}'
28 | runVersion: '${{ parameters.artifactRunVersion }}'
29 | runId: '${{ parameters.artifactRunId }}'
30 | - bash: tar -xvf target.tar
31 | displayName: "Untar the target directory"
32 | - bash: "./.azure/scripts/push-to-nexus.sh"
33 | env:
34 | MVN_ARGS: "-e -V -B"
35 | BUILD_REASON: $(Build.Reason)
36 | BRANCH: $(Build.SourceBranch)
37 | GPG_PASSPHRASE: $(GPG_PASSPHRASE)
38 | GPG_SIGNING_KEY: $(GPG_SIGNING_KEY)
39 | NEXUS_USERNAME: $(NEXUS_USERNAME)
40 | NEXUS_PASSWORD: $(NEXUS_PASSWORD)
41 | displayName: "Deploy Java artifacts"
--------------------------------------------------------------------------------
/.azure/templates/jobs/push_container.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'push_container'
3 | displayName: 'Tag & Push'
4 | # Set timeout for jobs
5 | timeoutInMinutes: 60
6 | # Base system
7 | pool:
8 | vmImage: 'Ubuntu-22.04'
9 | # Pipeline steps
10 | steps:
11 | - template: "../steps/prerequisites/install_docker.yaml"
12 | - template: "../steps/prerequisites/install_cosign.yaml"
13 | - template: "../steps/prerequisites/install_syft.yaml"
14 | - ${{ each arch in parameters.architectures }}:
15 | - task: DownloadPipelineArtifact@2
16 | inputs:
17 | source: '${{ parameters.artifactSource }}'
18 | artifact: Container-${{ arch }}
19 | path: $(System.DefaultWorkingDirectory)
20 | project: '${{ parameters.artifactProject }}'
21 | pipeline: '${{ parameters.artifactPipeline }}'
22 | runVersion: '${{ parameters.artifactRunVersion }}'
23 | runId: '${{ parameters.artifactRunId }}'
24 | # Push containers and their manifests
25 | - bash: "docker login -u $DOCKER_USER -p $DOCKER_PASS $DOCKER_REGISTRY"
26 | displayName: "Login to container registry"
27 | env:
28 | BUILD_REASON: $(Build.Reason)
29 | BRANCH: $(Build.SourceBranch)
30 | DOCKER_USER: $(QUAY_USER)
31 | DOCKER_PASS: $(QUAY_PASS)
32 | DOCKER_REGISTRY: "quay.io"
33 | - bash: "make docker_delete_manifest"
34 | displayName: "Delete existing container manifest"
35 | env:
36 | BUILD_REASON: $(Build.Reason)
37 | BRANCH: $(Build.SourceBranch)
38 | DOCKER_REGISTRY: "quay.io"
39 | DOCKER_ORG: "strimzi"
40 | DOCKER_TAG: '${{ parameters.dockerTag }}'
41 | - ${{ each arch in parameters.architectures }}:
42 | - bash: make docker_load docker_tag docker_push docker_amend_manifest docker_delete_archive
43 | displayName: "Push the ${{ arch }} containers and create manifest"
44 | env:
45 | BUILD_REASON: $(Build.Reason)
46 | BRANCH: $(Build.SourceBranch)
47 | DOCKER_REGISTRY: "quay.io"
48 | DOCKER_ORG: "strimzi"
49 | DOCKER_TAG: '${{ parameters.dockerTag }}'
50 | DOCKER_ARCHITECTURE: ${{ arch }}
51 | - bash: "make docker_push_manifest"
52 | displayName: "Push container manifest"
53 | env:
54 | BUILD_REASON: $(Build.Reason)
55 | BRANCH: $(Build.SourceBranch)
56 | DOCKER_REGISTRY: "quay.io"
57 | DOCKER_ORG: "strimzi"
58 | DOCKER_TAG: '${{ parameters.dockerTag }}'
59 | - bash: "make docker_sign_manifest"
60 | displayName: "Sign container manifest"
61 | env:
62 | BUILD_REASON: $(Build.Reason)
63 | BRANCH: $(Build.SourceBranch)
64 | BUILD_ID: $(Build.BuildId)
65 | BUILD_COMMIT: $(Build.SourceVersion)
66 | DOCKER_REGISTRY: "quay.io"
67 | DOCKER_ORG: "strimzi"
68 | DOCKER_TAG: '${{ parameters.dockerTag }}'
69 | COSIGN_PASSWORD: $(COSIGN_PASSWORD)
70 | COSIGN_PRIVATE_KEY: $(COSIGN_PRIVATE_KEY)
71 | # SBOMs generation, packaging, and signing
72 | - ${{ each arch in parameters.architectures }}:
73 | - bash: make docker_sbom
74 | displayName: "Generate SBOMs for ${{ arch }} container"
75 | env:
76 | BUILD_REASON: $(Build.Reason)
77 | BRANCH: $(Build.SourceBranch)
78 | DOCKER_REGISTRY: "quay.io"
79 | DOCKER_ORG: "strimzi"
80 | DOCKER_TAG: '${{ parameters.dockerTag }}'
81 | DOCKER_ARCHITECTURE: ${{ arch }}
82 | COSIGN_PASSWORD: $(COSIGN_PASSWORD)
83 | COSIGN_PRIVATE_KEY: $(COSIGN_PRIVATE_KEY)
84 | - bash: tar -z -C ./sbom/ -cvpf sbom.tar.gz ./
85 | displayName: "Tar the SBOM files"
86 | - publish: $(System.DefaultWorkingDirectory)/sbom.tar.gz
87 | artifact: SBOMs-${{ parameters.dockerTag }}
88 | displayName: "Publish the SBOM files"
89 | # push the SBOMs to container registry only for releases
90 | - ${{ each arch in parameters.architectures }}:
91 | - bash: make docker_push_sbom
92 | displayName: "Push SBOMs for ${{ arch }} container"
93 | condition: startsWith(variables['build.sourceBranch'], 'refs/heads/release-')
94 | env:
95 | BUILD_REASON: $(Build.Reason)
96 | BRANCH: $(Build.SourceBranch)
97 | DOCKER_REGISTRY: "quay.io"
98 | DOCKER_ORG: "strimzi"
99 | DOCKER_TAG: '${{ parameters.dockerTag }}'
100 | DOCKER_ARCHITECTURE: ${{ arch }}
101 | COSIGN_PASSWORD: $(COSIGN_PASSWORD)
102 | COSIGN_PRIVATE_KEY: $(COSIGN_PRIVATE_KEY)
103 |
--------------------------------------------------------------------------------
/.azure/templates/jobs/push_helm_chart.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'push_helm_chart_oci'
3 | displayName: 'Push Helm Chart as OCI artifact'
4 | # Set timeout for jobs
5 | timeoutInMinutes: 60
6 | # Base system
7 | pool:
8 | vmImage: 'Ubuntu-22.04'
9 | # Pipeline steps
10 | steps:
11 | # Install Prerequisites
12 | - template: "../steps/prerequisites/install_helm.yaml"
13 |
14 | # Unpack the release artifacts
15 | - task: DownloadPipelineArtifact@2
16 | inputs:
17 | source: '${{ parameters.artifactSource }}'
18 | artifact: HelmChartArchive
19 | path: $(System.DefaultWorkingDirectory)/
20 | project: '${{ parameters.artifactProject }}'
21 | pipeline: '${{ parameters.artifactPipeline }}'
22 | runVersion: '${{ parameters.artifactRunVersion }}'
23 | runId: '${{ parameters.artifactRunId }}'
24 |
25 | # Login Helm to the OCI Registry
26 | - bash: "helm registry login -u $DOCKER_USER -p $DOCKER_PASS $DOCKER_REGISTRY"
27 | displayName: "Login to OCI registry"
28 | env:
29 | DOCKER_USER: $(QUAY_HELM_USER)
30 | DOCKER_PASS: $(QUAY_HELM_PASS)
31 | DOCKER_REGISTRY: "quay.io"
32 |
33 | # Push the Helm Chart to the OCI Registry
34 | - bash: "helm push strimzi-access-operator-helm-3-chart-${{ parameters.releaseVersion }}.tgz oci://$DOCKER_REGISTRY/$DOCKER_ORG"
35 | displayName: "Push Helm Chart OCI artifact"
36 | env:
37 | DOCKER_REGISTRY: "quay.io"
38 | DOCKER_ORG: "strimzi-helm"
--------------------------------------------------------------------------------
/.azure/templates/jobs/run_systemtests.yaml:
--------------------------------------------------------------------------------
1 | jobs:
2 | - job: 'run_systemtest'
3 | displayName: "Run systemtests"
4 | strategy:
5 | matrix:
6 | ${{ each arch in parameters.architectures }}:
7 | ${{ arch }}:
8 | arch: ${{ arch }}
9 | image: 'Ubuntu-22.04'
10 | jdk_version: '17'
11 | pool:
12 | vmImage: $(image)
13 | timeoutInMinutes: 30
14 | steps:
15 | - template: '../steps/prerequisites/install_java.yaml'
16 | parameters:
17 | JDK_VERSION: $(jdk_version)
18 | - template: "../steps/prerequisites/install_docker.yaml"
19 | - template: "../steps/prerequisites/install_kind.yaml"
20 | - task: DownloadPipelineArtifact@2
21 | inputs:
22 | source: current
23 | artifact: Container-$(arch)
24 | path: $(System.DefaultWorkingDirectory)
25 | - script: |
26 | echo "##vso[task.setvariable variable=docker_registry]$(hostname --ip-address | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b' | awk '$1 != "127.0.0.1" { print $1 }' | head -1):5001"
27 | displayName: "Set docker_registry to local registry"
28 | condition: eq(variables['docker_registry'], 'localhost:5000')
29 | - bash: |
30 | eval $(minikube docker-env)
31 | DOCKER_ARCHITECTURE=$(arch) make docker_load
32 | make docker_tag
33 | make docker_push
34 | env:
35 | BUILD_TAG: latest-$(arch)
36 | displayName: 'Docker load & tag & push to local registries - $(arch)'
37 | - task: Maven@4
38 | inputs:
39 | mavenPOMFile: 'pom.xml'
40 | goals: 'install'
41 | options: '-B -Dmaven.javadoc.skip=true -DskipTests -am -pl systemtest'
42 | displayName: 'Build systemtest module'
43 | - task: Maven@4
44 | inputs:
45 | mavenPOMFile: 'systemtest/pom.xml'
46 | publishJUnitResults: true
47 | testResultsFiles: '**/failsafe-reports/TEST-*.xml'
48 | goals: 'verify'
49 | options: '-B -Dmaven.javadoc.skip=true -Pall'
50 | env:
51 | DOCKER_REGISTRY: registry.minikube
52 | DOCKER_ORG: strimzi
53 | DOCKER_TAG: latest
54 | displayName: 'Run systemtests - $(arch) - Bundle installation'
55 | - task: Maven@4
56 | inputs:
57 | mavenPOMFile: 'systemtest/pom.xml'
58 | publishJUnitResults: true
59 | testResultsFiles: '**/failsafe-reports/TEST-*.xml'
60 | goals: 'verify'
61 | options: '-B -Dmaven.javadoc.skip=true -Pall'
62 | env:
63 | DOCKER_REGISTRY: registry.minikube
64 | DOCKER_ORG: strimzi
65 | DOCKER_TAG: latest
66 | INSTALL_TYPE: Helm
67 | displayName: 'Run systemtests - $(arch) - Helm installation'
--------------------------------------------------------------------------------
/.azure/templates/steps/maven_cache.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - task: Cache@2
3 | inputs:
4 | key: 'maven-cache | $(System.JobName) | **/pom.xml'
5 | restoreKeys: |
6 | maven-cache | $(System.JobName)
7 | maven-cache
8 | path: $(HOME)/.m2/repository
9 | displayName: Maven cache
10 |
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_cosign.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: ".azure/scripts/install_cosign.sh"
3 | displayName: "Install cosign"
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_docker.yaml:
--------------------------------------------------------------------------------
1 | # Steps needed for local Docker installation
2 | steps:
3 | - task: DockerInstaller@0
4 | displayName: Install Docker
5 | inputs:
6 | dockerVersion: 20.10.8
7 | releaseType: stable
8 | - bash: |
9 | docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
10 | displayName: 'Register QEMU binary'
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_helm.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: ".azure/scripts/setup-helm.sh"
3 | displayName: "Install Helm"
4 | env:
5 | TEST_HELM3_VERSION: 'v3.16.2'
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_java.yaml:
--------------------------------------------------------------------------------
1 | # Step to configure JAVA on the agent
2 | parameters:
3 | - name: JDK_VERSION
4 | default: '17'
5 | steps:
6 | - task: JavaToolInstaller@0
7 | inputs:
8 | versionSpec: $(JDK_VERSION)
9 | jdkArchitectureOption: 'x64'
10 | jdkSourceOption: 'PreInstalled'
11 | displayName: 'Configure Java'
12 |
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_kind.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: ".azure/scripts/setup-kind.sh"
3 | displayName: "Setup Kind cluster"
4 | env:
5 | TEST_KUBECTL_VERSION: latest
6 | KIND_VERSION: v0.25.0
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_syft.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: ".azure/scripts/install_syft.sh"
3 | displayName: "Install Syft"
--------------------------------------------------------------------------------
/.azure/templates/steps/prerequisites/install_yq.yaml:
--------------------------------------------------------------------------------
1 | steps:
2 | - bash: ".azure/scripts/install_yq.sh"
3 | displayName: "Install yq"
--------------------------------------------------------------------------------
/.checkstyle/checkstyle.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
--------------------------------------------------------------------------------
/.checkstyle/suppressions.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
15 |
17 |
--------------------------------------------------------------------------------
/.checksums:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ### IMPORTANT ###
4 | # if the below line has changed, this means the ./helm-charts directory has changed
5 | # the checksum and ./helm-charts directory should only be modified on official releases as part of a release
6 | # if this checksum has changed as part of any non-release specific changes, please apply your changes to the
7 | # development version of the helm charts in ./packaging/helm-charts
8 | ### IMPORTANT ###
9 | HELM_CHART_CHECKSUM="23edfc4a6e4bdf8363209448ab754258f2929958 -"
10 |
11 | ### IMPORTANT ###
12 | # if the below line has changed, this means the ./install directory has changed
13 | # the checksum and ./install directory should only be modified on official releases as part of a release
14 | # if this checksum has changed as part of any non-release specific changes, please apply your changes to the
15 | # development version of the helm charts in ./packaging/install
16 | ### IMPORTANT ###
17 | INSTALL_CHECKSUM="285387a2e673598b6624805ac200d1e14d17f79d -"
18 |
19 | ### IMPORTANT ###
20 | # if the below line has changed, this means the ./examples directory has changed
21 | # the checksum and ./examples directory should only be modified on official releases as part of a release
22 | # if this checksum has changed as part of any non-release specific changes, please apply your changes to the
23 | # development version of the helm charts in ./packaging/examples
24 | ### IMPORTANT ###
25 | EXAMPLES_CHECKSUM="5329eddeedb33d52e207946b684862db93ebcd84 -"
26 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ main, release* ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ main ]
20 | schedule:
21 | - cron: '23 17 * * 3'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 |
28 | strategy:
29 | fail-fast: false
30 | matrix:
31 | language: [ 'java' ]
32 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
33 | # Learn more:
34 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
35 |
36 | steps:
37 | - name: Checkout repository
38 | uses: actions/checkout@v4
39 |
40 | # Setup OpenJDK
41 | - name: Setup java
42 | uses: actions/setup-java@v4
43 | with:
44 | java-version: 17
45 | distribution: 'temurin'
46 | cache: 'maven'
47 |
48 | # Initializes the CodeQL tools for scanning.
49 | - name: Initialize CodeQL
50 | uses: github/codeql-action/init@v3
51 | with:
52 | languages: ${{ matrix.language }}
53 | # If you wish to specify custom queries, you can do so here or in a config file.
54 | # By default, queries listed here will override any specified in a config file.
55 | # Prefix the list here with "+" to use these queries and those in the config file.
56 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
57 |
58 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
59 | # If this step fails, then you should remove it and run the build manually (see below)
60 | - name: Autobuild
61 | uses: github/codeql-action/autobuild@v3
62 |
63 | # ℹ️ Command-line programs to run using the OS shell.
64 | # 📚 https://git.io/JvXDl
65 |
66 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
67 | # and modify them (or add more) to build your code if your project
68 | # uses a compiled language
69 |
70 | #- run: |
71 | # make bootstrap
72 | # make release
73 |
74 | - name: Perform CodeQL Analysis
75 | uses: github/codeql-action/analyze@v3
76 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Maven build targets
2 | target/
3 |
4 | # Eclipse specific
5 | .project
6 | .settings/
7 | .prefs
8 | .classpath
9 | .apt_generated/
10 | .apt_generated_tests/
11 |
12 | # IntelliJ IDEA specific
13 | .idea/
14 | *.iml
15 |
16 | # VS Code
17 | .factorypath
18 | .vscode
19 |
20 | # MacOS Desktop Services Store files
21 | **/.DS_Store
22 |
23 | # Generated files for docker image build
24 | .access-operator.tmp
25 | docker-image/tmp/**
26 |
--------------------------------------------------------------------------------
/.spotbugs/spotbugs-exclude.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Strimzi Community Code of Conduct
2 |
3 | Strimzi Community Code of Conduct is defined in the [governance repository](https://github.com/strimzi/governance/blob/master/CODE_OF_CONDUCT.md).
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM registry.access.redhat.com/ubi9/ubi-minimal:latest
2 | ARG JAVA_VERSION=17
3 | ARG TARGETPLATFORM
4 |
5 | USER root
6 |
7 | RUN microdnf update -y \
8 | && microdnf --setopt=install_weak_deps=0 --setopt=tsflags=nodocs install -y java-${JAVA_VERSION}-openjdk-headless openssl shadow-utils \
9 | && microdnf clean all -y
10 |
11 | # Set JAVA_HOME env var
12 | ENV JAVA_HOME=/usr/lib/jvm/jre-17
13 |
14 | # Add strimzi user with UID 1001
15 | # The user is in the group 0 to have access to the mounted volumes and storage
16 | RUN useradd -r -m -u 1001 -g 0 strimzi
17 |
18 | ARG access_operator_version=1.0-SNAPSHOT
19 | ENV ACCESS_OPERATOR_VERSION=${access_operator_version}
20 | ENV STRIMZI_HOME=/opt/strimzi
21 | RUN mkdir -p ${STRIMZI_HOME}
22 | WORKDIR ${STRIMZI_HOME}
23 |
24 | #####
25 | # Add Kafka Access Operator
26 | #####
27 | COPY operator/target/operator-${access_operator_version} ./
28 |
29 | #####
30 | # Add Tini
31 | #####
32 | ENV TINI_VERSION=v0.19.0
33 | ENV TINI_SHA256_AMD64=93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c
34 | ENV TINI_SHA256_ARM64=07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81
35 | ENV TINI_SHA256_PPC64LE=3f658420974768e40810001a038c29d003728c5fe86da211cff5059e48cfdfde
36 | ENV TINI_SHA256_S390X=931b70a182af879ca249ae9de87ef68423121b38d235c78997fafc680ceab32d
37 |
38 | RUN set -ex; \
39 | if [[ ${TARGETPLATFORM} = "linux/ppc64le" ]]; then \
40 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-ppc64le -o /usr/bin/tini; \
41 | echo "${TINI_SHA256_PPC64LE} */usr/bin/tini" | sha256sum -c; \
42 | chmod +x /usr/bin/tini; \
43 | elif [[ ${TARGETPLATFORM} = "linux/arm64" ]]; then \
44 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-arm64 -o /usr/bin/tini; \
45 | echo "${TINI_SHA256_ARM64} */usr/bin/tini" | sha256sum -c; \
46 | chmod +x /usr/bin/tini; \
47 | elif [[ ${TARGETPLATFORM} = "linux/s390x" ]]; then \
48 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-s390x -o /usr/bin/tini; \
49 | echo "${TINI_SHA256_S390X} */usr/bin/tini" | sha256sum -c; \
50 | chmod +x /usr/bin/tini; \
51 | else \
52 | curl -s -L https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini -o /usr/bin/tini; \
53 | echo "${TINI_SHA256_AMD64} */usr/bin/tini" | sha256sum -c; \
54 | chmod +x /usr/bin/tini; \
55 | fi
56 |
57 | USER 1001
58 |
59 | CMD ["/opt/strimzi/bin/access_operator_run.sh"]
--------------------------------------------------------------------------------
/GOVERNANCE.md:
--------------------------------------------------------------------------------
1 | # Strimzi Governance
2 |
3 | Strimzi Governance is defined in the [governance repository](https://github.com/strimzi/governance/blob/master/GOVERNANCE.md).
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | # Strimzi Maintainers list
2 |
3 | Strimzi Maintainers list is defined in the [governance repository](https://github.com/strimzi/governance/blob/master/MAINTAINERS).
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | include ./Makefile.os
2 | include ./Makefile.docker
3 | include ./Makefile.maven
4 |
5 | PROJECT_NAME ?= access-operator
6 | GITHUB_VERSION ?= main
7 | RELEASE_VERSION ?= latest
8 |
9 | ifneq ($(RELEASE_VERSION),latest)
10 | GITHUB_VERSION = $(RELEASE_VERSION)
11 | endif
12 |
13 | .PHONY: release
14 | release: release_prepare release_maven release_version release_pkg
15 |
16 | release_prepare:
17 | rm -rf ./strimzi-access-operator-$(RELEASE_VERSION)
18 | rm -f ./strimzi-access-operator-$(RELEASE_VERSION).tar.gz
19 | rm -f ./strimzi-access-operator-$(RELEASE_VERSION).zip
20 | rm -f ./strimzi-access-operator-helm-3-chart-$(RELEASE_VERSION).zip
21 | mkdir ./strimzi-access-operator-$(RELEASE_VERSION)
22 |
23 | release_version:
24 | echo "Update release.version to $(RELEASE_VERSION)"
25 | echo $(shell echo $(RELEASE_VERSION) | tr a-z A-Z) > release.version
26 | echo "Changing Docker image tags in install to :$(RELEASE_VERSION)"
27 | $(FIND) ./packaging/install -name '*.yaml' -type f -exec $(SED) -i '/image: "\?quay.io\/strimzi\/[a-zA-Z0-9_.-]\+:[a-zA-Z0-9_.-]\+"\?/s/:[a-zA-Z0-9_.-]\+/:$(RELEASE_VERSION)/g' {} \;
28 | echo "Changing Docker image tags in Helm Chart to :$(RELEASE_VERSION)"
29 | CHART_PATH=./packaging/helm-charts/helm3/strimzi-access-operator; \
30 | $(SED) -i 's/\(tag: \).*/\1$(RELEASE_VERSION)/g' $$CHART_PATH/values.yaml; \
31 | $(SED) -i 's/\(image.tag[^\n]*| \)`.*`/\1`$(RELEASE_VERSION)`/g' $$CHART_PATH/README.md
32 |
33 | release_maven:
34 | echo "Update pom versions to $(RELEASE_VERSION)"
35 | mvn $(MVN_ARGS) versions:set -DnewVersion=$(shell echo $(RELEASE_VERSION) | tr a-z A-Z)
36 | mvn $(MVN_ARGS) versions:commit
37 |
38 | release_pkg: helm_pkg
39 | $(CP) -r ./packaging/install ./
40 | $(CP) -r ./packaging/install ./strimzi-access-operator-$(RELEASE_VERSION)/
41 | $(CP) -r ./packaging/examples ./
42 | $(CP) -r ./packaging/examples ./strimzi-access-operator-$(RELEASE_VERSION)/
43 | tar -z -cf ./strimzi-access-operator-$(RELEASE_VERSION).tar.gz strimzi-access-operator-$(RELEASE_VERSION)/
44 | zip -r ./strimzi-access-operator-$(RELEASE_VERSION).zip strimzi-access-operator-$(RELEASE_VERSION)/
45 | rm -rf ./strimzi-access-operator-$(RELEASE_VERSION)
46 | rm -rfv ./examples
47 | $(CP) -rv ./packaging/examples ./examples
48 | rm -rfv ./install
49 | mkdir ./install
50 | $(FIND) ./packaging/install/ -mindepth 1 -maxdepth 1 ! -name Makefile -type f,d -exec $(CP) -rv {} ./install/ \;
51 | rm -rfv ./helm-charts/helm3/strimzi-access-operator
52 | mkdir -p ./helm-charts/helm3/
53 | $(CP) -rv ./packaging/helm-charts/helm3/strimzi-access-operator ./helm-charts/helm3/strimzi-access-operator
54 |
55 | helm_pkg:
56 | # Copying unarchived Helm Chart to release directory
57 | mkdir -p strimzi-access-operator-$(RELEASE_VERSION)/helm3-chart/
58 | helm package --version $(RELEASE_VERSION) --app-version $(RELEASE_VERSION) --destination ./ ./packaging/helm-charts/helm3/strimzi-access-operator/
59 | $(CP) strimzi-access-operator-$(RELEASE_VERSION).tgz strimzi-access-operator-helm-3-chart-$(RELEASE_VERSION).tgz
60 | rm -rf strimzi-access-operator-$(RELEASE_VERSION)/helm3-chart/
61 | rm strimzi-access-operator-$(RELEASE_VERSION).tgz
62 |
63 | .PHONY: crd_install
64 | crd_install:
65 | $(CP) ./api/target/classes/META-INF/fabric8/kafkaaccesses.access.strimzi.io-v1.yml ./packaging/install/040-Crd-kafkaaccess.yaml
66 | yq eval -i '.metadata.labels."servicebinding.io/provisioned-service"="true"' ./packaging/install/040-Crd-kafkaaccess.yaml
67 | $(CP) ./api/target/classes/META-INF/fabric8/kafkaaccesses.access.strimzi.io-v1.yml ./packaging/helm-charts/helm3/strimzi-access-operator/crds/040-Crd-kafkaaccess.yaml
68 | yq eval -i '.metadata.labels."servicebinding.io/provisioned-service"="true"' ./packaging/helm-charts/helm3/strimzi-access-operator/crds/040-Crd-kafkaaccess.yaml
69 |
70 | .PHONY: helm_install
71 | helm_install: packaging/helm-charts/helm3
72 | $(MAKE) -C packaging/helm-charts/helm3 $(MAKECMDGOALS)
73 |
74 | .PHONY: next_version
75 | next_version:
76 | echo $(shell echo $(NEXT_VERSION) | tr a-z A-Z) > release.version
77 | mvn versions:set -DnewVersion=$(shell echo $(NEXT_VERSION) | tr a-z A-Z)
78 | mvn versions:commit
79 |
80 | release_files_check:
81 | ./.azure/scripts/release_files_check.sh
82 |
83 | checksum_examples:
84 | @$(FIND) ./examples/ -type f -print0 | LC_ALL=C $(SORT) -z | $(XARGS) -0 $(SHA1SUM) | $(SHA1SUM)
85 |
86 | checksum_install:
87 | @$(FIND) ./install/ -type f -print0 | LC_ALL=C $(SORT) -z | $(XARGS) -0 $(SHA1SUM) | $(SHA1SUM)
88 |
89 | checksum_helm:
90 | @$(FIND) ./helm-charts/ -type f -print0 | LC_ALL=C $(SORT) -z | $(XARGS) -0 $(SHA1SUM) | $(SHA1SUM)
91 |
92 | .PHONY: all
93 | all: java_package docker_build docker_push crd_install helm_install
94 |
95 | .PHONY: build
96 | build: java_verify crd_install docker_build
97 |
98 | .PHONY: clean
99 | clean: java_clean
100 |
--------------------------------------------------------------------------------
/Makefile.docker:
--------------------------------------------------------------------------------
1 | # Makefile.docker contains the shared tasks for building, tagging and pushing Docker images.
2 | # This file is included into the Makefile files which contain the Dockerfile files.
3 | #
4 | # The DOCKER_ORG (default is name of the current user) and DOCKER_TAG (based on Git Tag,
5 | # default latest) variables are used to name the Docker image. DOCKER_REGISTRY identifies
6 | # the registry where the image will be pushed (default is Docker Hub).
7 | DOCKERFILE_DIR ?= ./
8 | DOCKER_CMD ?= docker
9 | DOCKER_REGISTRY ?= quay.io
10 | DOCKER_ORG ?= $(USER)
11 | DOCKER_TAG ?= latest
12 | BUILD_TAG ?= latest
13 | TOPDIR ?= $(dir $(lastword $(MAKEFILE_LIST)))
14 | SBOM_DIR = $(TOPDIR)sbom
15 | RELEASE_VERSION ?= $(shell cat $(TOPDIR)/release.version)
16 |
17 | ifdef DOCKER_ARCHITECTURE
18 | DOCKER_PLATFORM = --platform linux/$(DOCKER_ARCHITECTURE)
19 | DOCKER_PLATFORM_TAG_SUFFIX = -$(DOCKER_ARCHITECTURE)
20 | endif
21 |
22 | .PHONY: docker_build
23 | docker_build:
24 | # Build Docker image ...
25 | $(DOCKER_CMD) $(DOCKER_BUILDX) build $(DOCKER_PLATFORM) $(DOCKER_BUILD_ARGS) --build-arg access_operator_version=$(RELEASE_VERSION) -t strimzi/$(PROJECT_NAME):latest $(DOCKERFILE_DIR)
26 | # The Dockerfiles all use FROM ...:latest, so it is necessary to tag images with latest (-t above)
27 | # But because we generate Kafka images for different versions we also need to tag with something
28 | # including the kafka version number. This BUILD_TAG is used by the docker_tag target.
29 | # Also tag with $(BUILD_TAG)
30 | $(DOCKER_CMD) tag strimzi/$(PROJECT_NAME):latest strimzi/$(PROJECT_NAME):$(BUILD_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX)
31 |
32 | .PHONY: docker_save
33 | docker_save:
34 | # Saves the container as TGZ file
35 | $(DOCKER_CMD) save strimzi/$(PROJECT_NAME):$(BUILD_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) | gzip > access-operator-container$(DOCKER_PLATFORM_TAG_SUFFIX).tar.gz
36 |
37 | .PHONY: docker_load
38 | docker_load:
39 | # Loads the container as TGZ file
40 | $(DOCKER_CMD) load < access-operator-container$(DOCKER_PLATFORM_TAG_SUFFIX).tar.gz
41 |
42 | .PHONY: docker_tag
43 | docker_tag:
44 | # Tag the $(BUILD_TAG) image we built with the given $(DOCKER_TAG) tag
45 | $(DOCKER_CMD) tag strimzi/$(PROJECT_NAME):$(BUILD_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX)
46 |
47 | .PHONY: docker_push
48 | docker_push: docker_tag
49 | # Push the $(DOCKER_TAG)-tagged image to the registry
50 | $(DOCKER_CMD) push $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX)
51 |
52 | .PHONY: docker_delete_archive
53 | docker_delete_archive:
54 | # Deletes the archive
55 | rm access-operator-container$(DOCKER_PLATFORM_TAG_SUFFIX).tar.gz
56 |
57 | .PHONY: docker_amend_manifest
58 | docker_amend_manifest:
59 | # Create / Amend the manifest
60 | $(DOCKER_CMD) manifest create $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG) --amend $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX)
61 |
62 | .PHONY: docker_push_manifest
63 | docker_push_manifest:
64 | # Push the manifest to the registry
65 | $(DOCKER_CMD) manifest push $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)
66 |
67 | .PHONY: docker_delete_manifest
68 | docker_delete_manifest:
69 | # Delete the manifest to the registry, ignore the error if manifest doesn't exist
70 | $(DOCKER_CMD) manifest rm $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG) || true
71 |
72 | .PHONY: docker_sign_manifest
73 | docker_sign_manifest:
74 | # Signs the manifest and its images
75 | @echo $$COSIGN_PRIVATE_KEY | base64 -d > cosign.key
76 | MANIFEST_DIGEST=$(shell docker buildx imagetools inspect $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG) --format '{{ json . }}' | jq -r .manifest.digest); \
77 | cosign sign --recursive --tlog-upload=false -a author=StrimziCI -a BuildID=$(BUILD_ID) -a Commit=$(BUILD_COMMIT) --key cosign.key $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)@$$MANIFEST_DIGEST
78 | @rm cosign.key
79 |
80 | .PHONY: docker_sbom
81 | docker_sbom:
82 | # Saves the SBOM of the image
83 | test -d $(SBOM_DIR) || mkdir -p $(SBOM_DIR)
84 | # Generate the text format
85 | MANIFEST_DIGEST=$(shell docker buildx imagetools inspect $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) --format '{{ json . }}' | jq -r .manifest.digest); \
86 | syft packages $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)@$$MANIFEST_DIGEST --output syft-table --file $(SBOM_DIR)/$(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)/$(DOCKER_TAG)/$$MANIFEST_DIGEST.txt
87 | # Generate the SPDX JSON format for machine processing
88 | MANIFEST_DIGEST=$(shell docker buildx imagetools inspect $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) --format '{{ json . }}' | jq -r .manifest.digest); \
89 | syft packages $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)@$$MANIFEST_DIGEST --output spdx-json --file $(SBOM_DIR)/$(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)/$(DOCKER_TAG)/$$MANIFEST_DIGEST.json
90 | # Sign the TXT and SPDX-JSON SBOM
91 | @echo $$COSIGN_PRIVATE_KEY | base64 -d > cosign.key
92 | MANIFEST_DIGEST=$(shell docker buildx imagetools inspect $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) --format '{{ json . }}' | jq -r .manifest.digest); \
93 | cosign sign-blob --tlog-upload=false --key cosign.key --bundle $(SBOM_DIR)/$(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)/$(DOCKER_TAG)/$$MANIFEST_DIGEST.txt.bundle $(SBOM_DIR)/$(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)/$(DOCKER_TAG)/$$MANIFEST_DIGEST.txt
94 | MANIFEST_DIGEST=$(shell docker buildx imagetools inspect $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) --format '{{ json . }}' | jq -r .manifest.digest); \
95 | cosign sign-blob --tlog-upload=false --key cosign.key --bundle $(SBOM_DIR)/$(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)/$(DOCKER_TAG)/$$MANIFEST_DIGEST.json.bundle $(SBOM_DIR)/$(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)/$(DOCKER_TAG)/$$MANIFEST_DIGEST.json
96 | @rm cosign.key
97 |
98 | .PHONY: docker_push_sbom
99 | docker_push_sbom:
100 | # Push the SBOM to the container registry and sign it
101 | @echo $$COSIGN_PRIVATE_KEY | base64 -d > cosign.key
102 | MANIFEST_DIGEST=$(shell docker buildx imagetools inspect $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) --format '{{ json . }}' | jq -r .manifest.digest); \
103 | cosign attach sbom --sbom $(SBOM_DIR)/$(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)/$(DOCKER_TAG)/$$MANIFEST_DIGEST.json $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX)
104 | MANIFEST_DIGEST=$(shell docker buildx imagetools inspect $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME):$(DOCKER_TAG)$(DOCKER_PLATFORM_TAG_SUFFIX) --format '{{ json . }}' | jq -r .manifest.digest); \
105 | cosign sign --tlog-upload=false -a author=StrimziCI -a BuildID=$(BUILD_ID) -a Commit=$(BUILD_COMMIT) --key cosign.key --attachment sbom $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(PROJECT_NAME)@$$MANIFEST_DIGEST
106 | @rm cosign.key
--------------------------------------------------------------------------------
/Makefile.maven:
--------------------------------------------------------------------------------
1 | # Makefile.maven contains the shared tasks for building Java applications. This file is
2 | # included into the Makefile files which contain some Java sources which should be build
3 |
4 | .PHONY: java_compile
5 | java_compile:
6 | echo "Building JAR file ..."
7 | mvn $(MVN_ARGS) compile
8 |
9 | .PHONY: java_verify
10 | java_verify:
11 | echo "Building JAR file ..."
12 | mvn $(MVN_ARGS) verify
13 |
14 | .PHONY: java_package
15 | java_package:
16 | echo "Packaging project ..."
17 | mvn $(MVN_ARGS) package
18 |
19 | .PHONY: java_install
20 | java_install:
21 | echo "Installing JAR files ..."
22 | mvn $(MVN_ARGS) install
23 |
24 | .PHONY: java_clean
25 | java_clean:
26 | echo "Cleaning Maven build ..."
27 | mvn clean
28 |
29 | .PHONY: spotbugs
30 | spotbugs: java_compile
31 | mvn $(MVN_ARGS) spotbugs:check
32 |
--------------------------------------------------------------------------------
/Makefile.os:
--------------------------------------------------------------------------------
1 | FIND = find
2 | SED = sed
3 | GREP = grep
4 | CP = cp
5 | SORT = sort
6 | SHA1SUM = sha1sum
7 | XARGS = xargs
8 |
9 | UNAME_S := $(shell uname -s)
10 | ifeq ($(UNAME_S),Darwin)
11 | FIND = gfind
12 | SED = gsed
13 | GREP = ggrep
14 | CP = gcp
15 | SORT = gsort
16 | SHA1SUM = gsha1sum
17 | XARGS = gxargs
18 | endif
--------------------------------------------------------------------------------
/api/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 |
8 | io.strimzi.access-operator
9 | kafka-access-operator
10 | 0.2.0-SNAPSHOT
11 |
12 |
13 | api
14 |
15 |
16 | 17
17 | 17
18 | UTF-8
19 |
20 | false
21 |
22 |
23 |
24 |
25 | io.javaoperatorsdk
26 | operator-framework-core
27 |
28 |
29 | io.fabric8
30 | kubernetes-httpclient-vertx
31 |
32 |
33 |
34 |
35 | io.fabric8
36 | kubernetes-httpclient-jdk
37 |
38 |
39 | io.fabric8
40 | kubernetes-client-api
41 |
42 |
43 | io.fabric8
44 | kubernetes-model-core
45 |
46 |
47 | io.fabric8
48 | kubernetes-model-common
49 |
50 |
51 |
52 | io.fabric8
53 | crd-generator-api-v2
54 | provided
55 |
56 |
57 | io.sundr
58 | builder-annotations
59 | compile
60 |
61 |
62 | io.strimzi
63 | api
64 |
65 |
66 | io.fabric8
67 | kubernetes-client
68 |
69 |
70 |
71 |
72 | org.apache.logging.log4j
73 | log4j-slf4j-impl
74 |
75 |
76 |
77 | org.junit.jupiter
78 | junit-jupiter-api
79 | test
80 |
81 |
82 | org.assertj
83 | assertj-core
84 | test
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 | io.fabric8
93 | crd-generator-maven-plugin
94 | ${fabric8.version}
95 |
96 |
97 |
98 |
99 |
100 | io.fabric8
101 | crd-generator-maven-plugin
102 |
103 |
104 |
105 | generate
106 |
107 |
108 |
109 | io.strimzi.kafka.access.model.KafkaAccess
110 |
111 | true
112 | true
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
--------------------------------------------------------------------------------
/api/src/main/java/io/strimzi/kafka/access/internal/StatusUtils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.internal;
6 |
7 | import java.time.ZoneOffset;
8 | import java.time.ZonedDateTime;
9 | import java.time.format.DateTimeFormatter;
10 | import java.util.Comparator;
11 | import java.util.List;
12 | import java.util.Objects;
13 |
14 | import io.strimzi.api.kafka.model.common.Condition;
15 | import io.strimzi.api.kafka.model.common.ConditionBuilder;
16 |
17 | /**
18 | * Utility methods for working with status sections of custom resources
19 | */
20 | public class StatusUtils {
21 | /**
22 | * Type of a ready status condition.
23 | */
24 | public static final String CONDITION_TYPE_READY = "Ready";
25 |
26 | /**
27 | * Indicates that a condition is applicable
28 | */
29 | public static final String CONDITION_STATUS_TRUE = "True";
30 |
31 | /**
32 | * Indicates that a condition is not applicable
33 | */
34 | public static final String CONDITION_STATUS_FALSE = "False";
35 |
36 | /**
37 | * Returns the current timestamp in ISO 8601 format, for example "2019-07-23T09:08:12.356Z".
38 | *
39 | * @return the current timestamp in ISO 8601 format, for example "2019-07-23T09:08:12.356Z".
40 | */
41 | public static String iso8601Now() {
42 | return ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT);
43 | }
44 |
45 | /**
46 | * Replaces a condition in the status
47 | *
48 | * @param conditions The current condition list
49 | * @param condition The condition
50 | */
51 | public static void setCondition(final List conditions, final Condition condition) {
52 | boolean found = false;
53 |
54 | for (final Condition c : conditions) {
55 | if (Objects.equals(c.getType(), condition.getType())) {
56 |
57 | found = true;
58 |
59 | if (!Objects.equals(c.getStatus(), condition.getStatus())) {
60 | c.setLastTransitionTime(StatusUtils.iso8601Now());
61 | c.setStatus(condition.getStatus());
62 | }
63 | if (!Objects.equals(c.getReason(), condition.getReason())) {
64 | c.setReason(condition.getReason());
65 | }
66 | if (!Objects.equals(c.getMessage(), condition.getMessage())) {
67 | c.setMessage(condition.getMessage());
68 | }
69 | if (c.getLastTransitionTime() == null) {
70 | c.setLastTransitionTime(StatusUtils.iso8601Now());
71 | }
72 | }
73 | }
74 |
75 | if (!found) {
76 | if (condition.getLastTransitionTime() == null) {
77 | condition.setLastTransitionTime(StatusUtils.iso8601Now());
78 | }
79 |
80 | conditions.add(condition);
81 | }
82 |
83 | // keep sorted by type to avoid resource changed events because of changing orders
84 | conditions.sort(Comparator.comparing(Condition::getType));
85 | }
86 |
87 |
88 | /**
89 | * Creates a new ready type condition
90 | *
91 | * @param ready Whether the resource is ready
92 | * @param reason Reason for the condition
93 | * @param message Message of the condition
94 | *
95 | * @return New ready type condition
96 | */
97 | public static Condition buildReadyCondition(boolean ready, String reason, String message) {
98 | return new ConditionBuilder()
99 | .withType(CONDITION_TYPE_READY)
100 | .withStatus(ready ? CONDITION_STATUS_TRUE : CONDITION_STATUS_FALSE)
101 | .withReason(reason)
102 | .withMessage(message)
103 | .build();
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/api/src/main/java/io/strimzi/kafka/access/model/BindingStatus.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.model;
6 |
7 | import io.strimzi.api.kafka.model.common.Constants;
8 | import io.sundr.builder.annotations.Buildable;
9 |
10 | /**
11 | * The status class for keeping the state of service binding status
12 | */
13 | @Buildable(
14 | editableEnabled = false,
15 | builderPackage = Constants.FABRIC8_KUBERNETES_API
16 | )
17 | public class BindingStatus {
18 |
19 | private String name;
20 |
21 | /**
22 | * Default constructor
23 | */
24 | public BindingStatus() {
25 | }
26 |
27 | /**
28 | * Constructor
29 | *
30 | * @param name The Kubernetes secret name
31 | */
32 | public BindingStatus(final String name) {
33 | this.setName(name);
34 | }
35 |
36 | /**
37 | * Gets the name of the BindingStatus instance
38 | *
39 | * @return A name for the BindingStatus instance
40 | */
41 | public String getName() {
42 | return name;
43 | }
44 |
45 | /**
46 | * Gets the name of the BindingStatus instance
47 | *
48 | * @param name The name of the BindingStatus instance
49 | */
50 | public void setName(final String name) {
51 | this.name = name;
52 | }
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/api/src/main/java/io/strimzi/kafka/access/model/KafkaAccess.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.model;
6 |
7 | import io.fabric8.kubernetes.api.model.Namespaced;
8 | import io.fabric8.kubernetes.client.CustomResource;
9 | import io.fabric8.kubernetes.model.annotation.Group;
10 | import io.fabric8.kubernetes.model.annotation.ShortNames;
11 | import io.fabric8.kubernetes.model.annotation.Version;
12 | import io.strimzi.api.kafka.model.common.Constants;
13 | import io.sundr.builder.annotations.Buildable;
14 | import io.sundr.builder.annotations.BuildableReference;
15 |
16 | import java.io.Serial;
17 |
18 | /**
19 | * The KafkaAccess custom resource model
20 | */
21 | @Group("access.strimzi.io")
22 | @Version("v1alpha1")
23 | @ShortNames("ka")
24 | @Buildable(
25 | editableEnabled = false,
26 | builderPackage = Constants.FABRIC8_KUBERNETES_API,
27 | refs = {@BuildableReference(CustomResource.class), @BuildableReference(io.fabric8.kubernetes.api.model.ObjectMeta.class)}
28 | )
29 | public class KafkaAccess extends CustomResource implements Namespaced {
30 | @Serial
31 | private static final long serialVersionUID = 1L;
32 |
33 | /**
34 | * The `kind` definition of the KafkaAccess custom resource
35 | */
36 | public static final String KIND = "KafkaAccess";
37 |
38 | @Override
39 | protected KafkaAccessStatus initStatus() {
40 | return new KafkaAccessStatus();
41 | }
42 | }
--------------------------------------------------------------------------------
/api/src/main/java/io/strimzi/kafka/access/model/KafkaAccessSpec.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.model;
6 |
7 | import io.fabric8.generator.annotation.Required;
8 | import io.strimzi.api.kafka.model.common.Constants;
9 | import io.sundr.builder.annotations.Buildable;
10 |
11 | /**
12 | * The spec model of the KafkaAccess resource
13 | */
14 | @Buildable(
15 | editableEnabled = false,
16 | builderPackage = Constants.FABRIC8_KUBERNETES_API
17 | )
18 | public class KafkaAccessSpec {
19 |
20 | @Required
21 | private KafkaReference kafka;
22 | private KafkaUserReference user;
23 |
24 | /**
25 | * Gets the KafkaReference instance
26 | *
27 | * @return A KafkaReference instance
28 | */
29 | public KafkaReference getKafka() {
30 | return kafka;
31 | }
32 |
33 | /**
34 | * Sets the KafkaReference instance
35 | *
36 | * @param kafka The KafkaReference model
37 | */
38 | public void setKafka(final KafkaReference kafka) {
39 | this.kafka = kafka;
40 | }
41 |
42 | /**
43 | * Gets the KafkaUserReference instance
44 | *
45 | * @return A KafkaUserReference instance
46 | */
47 | public KafkaUserReference getUser() {
48 | return user;
49 | }
50 |
51 | /**
52 | * Sets the KafkaUserReference instance
53 | *
54 | * @param kafkaUser The KafkaUserReference model
55 | */
56 | public void setUser(final KafkaUserReference kafkaUser) {
57 | this.user = kafkaUser;
58 | }
59 |
60 | }
61 |
--------------------------------------------------------------------------------
/api/src/main/java/io/strimzi/kafka/access/model/KafkaAccessStatus.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.model;
6 |
7 | import io.javaoperatorsdk.operator.api.ObservedGenerationAwareStatus;
8 | import io.strimzi.api.kafka.model.common.Constants;
9 | import io.strimzi.api.kafka.model.common.Condition;
10 | import io.strimzi.kafka.access.internal.StatusUtils;
11 | import io.sundr.builder.annotations.Buildable;
12 |
13 | import java.util.ArrayList;
14 | import java.util.List;
15 |
16 | /**
17 | * The status model of the KafkaAccess resource
18 | */
19 | @Buildable(
20 | editableEnabled = false,
21 | builderPackage = Constants.FABRIC8_KUBERNETES_API
22 | )
23 | public class KafkaAccessStatus extends ObservedGenerationAwareStatus {
24 |
25 | private BindingStatus binding;
26 | private final List conditions = new ArrayList<>();
27 |
28 | /**
29 | * Gets the BindingStatus instance
30 | *
31 | * @return A BindingStatus instance
32 | */
33 | public BindingStatus getBinding() {
34 | return binding;
35 | }
36 |
37 | /**
38 | * Sets the BindingStatus instance
39 | *
40 | * @param bindingStatus The BindingStatus model
41 | */
42 | public void setBinding(final BindingStatus bindingStatus) {
43 | this.binding = bindingStatus;
44 | }
45 |
46 | /**
47 | * Gets the status conditions
48 | *
49 | * @return The status conditions
50 | */
51 | public List getConditions() {
52 | return conditions;
53 | }
54 |
55 | /**
56 | * Replaces the Ready condition in the status
57 | *
58 | * @param ready Whether the resource is ready
59 | */
60 | public void setReadyCondition(final boolean ready) {
61 | StatusUtils.setCondition(
62 | this.conditions,
63 | StatusUtils.buildReadyCondition(ready, null, null)
64 | );
65 | }
66 |
67 | /**
68 | * Replaces the Ready condition in the status
69 | *
70 | * @param ready Whether the resource is ready
71 | * @param message The message for the status condition
72 | * @param reason The reason for the status condition
73 | */
74 | public void setReadyCondition(final boolean ready, final String message, final String reason) {
75 | StatusUtils.setCondition(
76 | this.conditions,
77 | StatusUtils.buildReadyCondition(ready, reason, message)
78 | );
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/api/src/main/java/io/strimzi/kafka/access/model/KafkaReference.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.model;
6 |
7 | import io.fabric8.generator.annotation.Required;
8 | import io.strimzi.api.kafka.model.common.Constants;
9 | import io.sundr.builder.annotations.Buildable;
10 |
11 | /**
12 | * The Kafka reference. Keeps state for a Kafka resource of Strimzi Kafka Operator
13 | */
14 | @Buildable(
15 | editableEnabled = false,
16 | builderPackage = Constants.FABRIC8_KUBERNETES_API
17 | )
18 | public class KafkaReference {
19 |
20 | @Required
21 | @io.fabric8.crd.generator.annotation.PrinterColumn(name = "Cluster")
22 | private String name;
23 | private String namespace;
24 | @io.fabric8.crd.generator.annotation.PrinterColumn(name = "Listener")
25 | private String listener;
26 |
27 | /**
28 | * Gets the name of the Kafka reference
29 | *
30 | * @return The name of the Kafka reference
31 | */
32 | public String getName() {
33 | return name;
34 | }
35 |
36 | /**
37 | * Sets the name of the Kafka reference
38 | *
39 | * @param name The name of the Kafka reference
40 | */
41 | public void setName(final String name) {
42 | this.name = name;
43 | }
44 |
45 | /**
46 | * Gets the namespace of the Kafka reference
47 | *
48 | * @return A namespace definition for the Kafka reference
49 | */
50 | public String getNamespace() {
51 | return namespace;
52 | }
53 |
54 | /**
55 | * Sets the namespace of the Kafka reference
56 | *
57 | * @param namespace The namespace of the Kafka reference
58 | */
59 | public void setNamespace(final String namespace) {
60 | this.namespace = namespace;
61 | }
62 |
63 | /**
64 | * Gets the listener of the Kafka reference
65 | *
66 | * @return A listener definition for the Kafka reference
67 | */
68 | public String getListener() {
69 | return listener;
70 | }
71 |
72 | /**
73 | * Sets the listener of the Kafka reference
74 | *
75 | * @param listener The listener of the Kafka reference
76 | */
77 | public void setListener(final String listener) {
78 | this.listener = listener;
79 | }
80 |
81 | }
82 |
--------------------------------------------------------------------------------
/api/src/main/java/io/strimzi/kafka/access/model/KafkaUserReference.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.model;
6 |
7 | import io.fabric8.generator.annotation.Required;
8 | import io.strimzi.api.kafka.model.common.Constants;
9 | import io.sundr.builder.annotations.Buildable;
10 |
11 | /**
12 | * The Kafka user reference, which keeps state for a KafkaUser resource of Strimzi Kafka Operator
13 | */
14 | @Buildable(
15 | editableEnabled = false,
16 | builderPackage = Constants.FABRIC8_KUBERNETES_API
17 | )
18 | public class KafkaUserReference {
19 |
20 | @Required
21 | private String kind;
22 | @Required
23 | private String apiGroup;
24 | @Required
25 | @io.fabric8.crd.generator.annotation.PrinterColumn(name = "User")
26 | private String name;
27 | private String namespace;
28 |
29 | /**
30 | * Gets the name of the Kafka user reference
31 | *
32 | * @return A name for the Kafka user reference
33 | */
34 | public String getName() {
35 | return name;
36 | }
37 |
38 | /**
39 | * Sets the name of the Kafka user reference
40 | *
41 | * @param name The name of the Kafka user reference
42 | */
43 | public void setName(final String name) {
44 | this.name = name;
45 | }
46 |
47 | /**
48 | * Gets the namespace of the Kafka user reference
49 | *
50 | * @return A namespace definition for the Kafka user reference
51 | */
52 | public String getNamespace() {
53 | return namespace;
54 | }
55 |
56 | /**
57 | * Sets the namespace of the Kafka user reference
58 | *
59 | * @param namespace The namespace of the Kafka user reference
60 | */
61 | public void setNamespace(final String namespace) {
62 | this.namespace = namespace;
63 | }
64 |
65 |
66 | /**
67 | * Gets the resource kind of the Kafka user reference
68 | *
69 | * @return A resource `kind` definition for the Kafka user reference
70 | */
71 | public String getKind() {
72 | return kind;
73 | }
74 |
75 | /**
76 | * Sets the resource kind of the Kafka user reference
77 | *
78 | * @param kind The resource kind of the Kafka user reference
79 | */
80 | public void setKind(final String kind) {
81 | this.kind = kind;
82 | }
83 |
84 | /**
85 | * Gets the resource API group of the Kafka user reference
86 | *
87 | * @return A resource API group definition for the Kafka user reference
88 | */
89 | public String getApiGroup() {
90 | return apiGroup;
91 | }
92 |
93 | /**
94 | * Sets the resource API group of the Kafka user reference
95 | *
96 | * @param apiGroup The resource API group of the Kafka user reference
97 | */
98 | public void setApiGroup(final String apiGroup) {
99 | this.apiGroup = apiGroup;
100 | }
101 |
102 |
103 | /**
104 | * Returns the serialized string of the KafkaUserReference object
105 | *
106 | * @return A serialized string of the KafkaUserReference object
107 | */
108 | @Override
109 | public String toString() {
110 | return "KafkaUserReference{" +
111 | "kind='" + kind + '\'' +
112 | ", apiGroup='" + apiGroup + '\'' +
113 | ", name='" + name + '\'' +
114 | ", namespace='" + namespace + '\'' +
115 | '}';
116 | }
117 | }
118 |
--------------------------------------------------------------------------------
/api/src/test/java/internal/StatusUtilsTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package internal;
6 |
7 | import java.util.ArrayList;
8 | import java.util.List;
9 |
10 | import io.strimzi.api.kafka.model.common.Condition;
11 | import io.strimzi.api.kafka.model.common.ConditionBuilder;
12 | import io.strimzi.kafka.access.internal.StatusUtils;
13 | import org.junit.jupiter.api.DisplayName;
14 | import org.junit.jupiter.api.Test;
15 |
16 | import static org.assertj.core.api.Assertions.assertThat;
17 |
18 | public class StatusUtilsTest {
19 | @Test
20 | @DisplayName("When a condition is added, the LastTransitionTime should be set if empty")
21 | void testLastTransitionTimeSetIfEmpty() {
22 | final List conditions = new ArrayList<>();
23 |
24 | var c1 = new ConditionBuilder()
25 | .withType(StatusUtils.CONDITION_TYPE_READY)
26 | .withStatus(StatusUtils.CONDITION_STATUS_FALSE)
27 | .withMessage("foo-1")
28 | .withReason("bar")
29 | .build();
30 |
31 | StatusUtils.setCondition(conditions, c1);
32 |
33 | assertThat(conditions).first().satisfies(c -> {
34 | assertThat(c.getLastTransitionTime()).isNotNull();
35 | });
36 | }
37 |
38 | @Test
39 | @DisplayName("When a condition is amended, the LastTransitionTime should be set if empty")
40 | void testLastTransitionTimeSetIfEmptyWhenAmend() {
41 | final List conditions = new ArrayList<>();
42 |
43 | var c1 = new ConditionBuilder()
44 | .withType(StatusUtils.CONDITION_TYPE_READY)
45 | .withStatus(StatusUtils.CONDITION_STATUS_FALSE)
46 | .withMessage("foo-1")
47 | .withReason("bar")
48 | .build();
49 | var c2 = new ConditionBuilder()
50 | .withType(StatusUtils.CONDITION_TYPE_READY)
51 | .withStatus(StatusUtils.CONDITION_STATUS_FALSE)
52 | .withMessage("foo-2")
53 | .withReason("bar")
54 | .build();
55 |
56 | conditions.add(c1);
57 |
58 | StatusUtils.setCondition(conditions, c2);
59 |
60 | assertThat(conditions).first().satisfies(c -> {
61 | assertThat(c.getLastTransitionTime()).isNotNull();
62 | assertThat(c.getMessage()).isEqualTo(c2.getMessage());
63 | });
64 | }
65 |
66 | @Test
67 | @DisplayName("When a condition is amended and its status changes, the LastTransitionTime should be updated accordingly")
68 | void testLastTransitionTimeUpdatedOnStatusChange() {
69 | final List conditions = new ArrayList<>();
70 |
71 | var c1 = new ConditionBuilder()
72 | .withType(StatusUtils.CONDITION_TYPE_READY)
73 | .withStatus(StatusUtils.CONDITION_STATUS_FALSE)
74 | .withMessage("foo-1")
75 | .withReason("bar")
76 | .build();
77 | var c2 = new ConditionBuilder()
78 | .withType(StatusUtils.CONDITION_TYPE_READY)
79 | .withStatus(StatusUtils.CONDITION_STATUS_TRUE)
80 | .withMessage("foo-2")
81 | .withReason("bar")
82 | .build();
83 |
84 | // add a condition, LastTransitionTime should be set
85 | StatusUtils.setCondition(conditions, c1);
86 |
87 | assertThat(conditions).first().satisfies(c -> {
88 | assertThat(c.getLastTransitionTime()).isNotNull();
89 | });
90 |
91 | var ltt1 = conditions.get(0).getLastTransitionTime();
92 |
93 | // amend the condition status, LastTransitionTime should be updated
94 | StatusUtils.setCondition(conditions, c2);
95 |
96 | assertThat(conditions).first().satisfies(c -> {
97 | assertThat(c.getLastTransitionTime()).isNotNull();
98 | assertThat(c.getLastTransitionTime()).isNotSameAs(ltt1);
99 | assertThat(c.getMessage()).isEqualTo(c2.getMessage());
100 | });
101 | }
102 |
103 | @Test
104 | @DisplayName("When a condition is amended and its status does not change, the LastTransitionTime should not be updated")
105 | void testLastTransitionTimeNotUpdated() {
106 | final List conditions = new ArrayList<>();
107 |
108 | var c1 = new ConditionBuilder()
109 | .withType(StatusUtils.CONDITION_TYPE_READY)
110 | .withStatus(StatusUtils.CONDITION_STATUS_FALSE)
111 | .withMessage("foo-1")
112 | .withReason("bar")
113 | .build();
114 | var c2 = new ConditionBuilder()
115 | .withType(StatusUtils.CONDITION_TYPE_READY)
116 | .withStatus(StatusUtils.CONDITION_STATUS_FALSE)
117 | .withMessage("foo-2")
118 | .withReason("bar")
119 | .build();
120 |
121 | // add a condition, LastTransitionTime should be set
122 | StatusUtils.setCondition(conditions, c1);
123 |
124 | assertThat(conditions).first().satisfies(c -> {
125 | assertThat(c.getLastTransitionTime()).isNotNull();
126 | });
127 |
128 | var ltt1 = conditions.get(0).getLastTransitionTime();
129 |
130 | // amend the condition but not its status, LastTransitionTime should not be updated
131 | StatusUtils.setCondition(conditions, c2);
132 |
133 | assertThat(conditions).first().satisfies(c -> {
134 | assertThat(c.getLastTransitionTime()).isNotNull();
135 | assertThat(c.getLastTransitionTime()).isSameAs(ltt1);
136 | assertThat(c.getMessage()).isEqualTo(c2.getMessage());
137 | });
138 | }
139 | }
140 |
--------------------------------------------------------------------------------
/bin/access_operator_run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 | set +x
4 |
5 | # Exit when we run out of heap memory
6 | JAVA_OPTS="${JAVA_OPTS} -XX:+ExitOnOutOfMemoryError"
7 |
8 | # Disable FIPS if needed
9 | if [ "$FIPS_MODE" = "disabled" ]; then
10 | export JAVA_OPTS="${JAVA_OPTS} -Dcom.redhat.fips=false"
11 | fi
12 |
13 | # Start Access operator
14 | exec /usr/bin/tini -w -e 143 -- java $JAVA_OPTS -classpath "libs/*" "io.strimzi.kafka.access.KafkaAccessOperator" "$@"
--------------------------------------------------------------------------------
/development-docs/TESTING.md:
--------------------------------------------------------------------------------
1 | # Running the system-tests
2 |
3 | This document gives a guide how to run the system-tests in the Kafka Access Operator repository.
4 |
5 | ## Pre-requisites
6 |
7 | For running the STs, you will need a Kubernetes environment.
8 | In case that you want to run the tests on your local machine, you can use [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) as we do in the Azure pipelines.
9 | Or you can be logged into the remote cluster (for example OpenShift or any other cluster that is available for you) and
10 | the test automation will deploy everything and run the test cases there.
11 |
12 | The following requirement is to have built the `systemtest` package dependency, which is the `api` module.
13 | You can achieve that by running the
14 |
15 | ```bash
16 | mvn clean install -DskipTests
17 | ```
18 |
19 | command in the root of the repository, or alternatively run the command for the `systemtest` module, just with `-am` flag:
20 |
21 | ```bash
22 | mvn clean install -pl systemtest -DskipTests -am
23 | ```
24 |
25 | The `api` module is needed, because we are using the classes like `KafkaAccessBuilder` in the tests.
26 |
27 | ## Environment variables
28 |
29 | The system-tests are also allowing to specify multiple environment variables, by which you can configure how the tests
30 | should be executed.
31 | You can configure the install type (Yaml or Helm), but also Docker registry, organisation, and tag.
32 | The following table shows full list of environment variables that you can configure:
33 |
34 | | Name | Description | Default |
35 | |:----------------|:------------------------------------------------------------------------|:--------|
36 | | DOCKER_REGISTRY | Specifies the Docker registry where the images are located | None |
37 | | DOCKER_ORG | Specifies the organization/repository containing the image | None |
38 | | DOCKER_TAG | Specifies the image tag | None |
39 | | INSTALL_TYPE | Specifies the method how the KAO should be installed - `Yaml` or `Helm` | Yaml |
40 |
41 | The default image for the KAO is used from [050-Deployment.yaml](../packaging/install/050-Deployment.yaml), so in case that you
42 | don't specify one of the `DOCKER_REGISTRY`, `DOCKER_ORG`, or `DOCKER_TAG`, the default from the file will be used.
43 |
44 | ## Running the tests
45 |
46 | To run the tests, you can use your IDE, which should show you the option to run the tests (in IntelliJ IDEA it's the "play" button)
47 | or you can run the following Maven command:
48 |
49 | ```bash
50 | mvn verify -pl systemtest -Pall
51 | ```
52 |
53 | With `-pl` flag, you specify that you want to run the tests inside the `systemtest` repository.
54 | The `-Pall` is needed, because by default the `skipTests` property is set to `true` in this repository - so the system-tests
55 | are not executed with every build of the project.
56 | The `all` profile contains configuration of `skipTests` to `false`, so the tests will be executed.
57 | You can alternatively run the command with `-DskipTests=false` instead of `-Pall`.
58 |
59 | ```bash
60 | mvn verify -pl systemtest -DskipTests=false
61 | ```
62 |
63 | These two commands will execute all system-tests present in the module.
64 | In case that you would like to run just one specific test, you can run the following command:
65 |
66 | ```bash
67 | mvn verify -pl systemtest -Pall -Dit.test=CLASS_NAME#TEST_CASE_NAME
68 | ```
69 |
70 | for example:
71 | ```bash
72 | mvn verify -pl systemtest -Pall -Dit.test=KafkaAccessOperatorST#testAccessToUnspecifiedMultipleListenersWithMultipleInternal
73 | ```
74 |
75 | You can specify multiple test cases similarly, you just need to separate them by comma.
--------------------------------------------------------------------------------
/examples/kafka-access-with-user.yaml:
--------------------------------------------------------------------------------
1 | # The operator will look up the Kafka instance specified and
2 | # create a secret with the details to connect to the listener specified.
3 | # It will also look up the KafkaUser specified and check it has the correct
4 | # authentication mechanism to connect to the listener. If so it will add the
5 | # user credentials to the secret it creates.
6 | # If no listener is specified it will choose one based on the user authentication.
7 | apiVersion: access.strimzi.io/v1alpha1
8 | kind: KafkaAccess
9 | metadata:
10 | name: my-kafka-access
11 | spec:
12 | kafka:
13 | name: my-cluster
14 | namespace: kafka
15 | listener: tls
16 | user:
17 | kind: KafkaUser
18 | apiGroup: kafka.strimzi.io
19 | name: my-user
20 | namespace: kafka
21 |
--------------------------------------------------------------------------------
/examples/kafka-access.yaml:
--------------------------------------------------------------------------------
1 | # The operator will look up the Kafka instance specified and
2 | # create a secret with the details to connect to the listener specified.
3 | # If no listener is specified it will choose one, preferring an internal listener.
4 | apiVersion: access.strimzi.io/v1alpha1
5 | kind: KafkaAccess
6 | metadata:
7 | name: my-kafka-access
8 | spec:
9 | kafka:
10 | name: my-cluster
11 | namespace: kafka
12 | listener: plain
13 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | # TODO
3 | appVersion: "0.1.0"
4 | description: "Strimzi Kafka Access Operator"
5 | name: strimzi-access-operator
6 | version: 0.1.0
7 | icon: https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/main/documentation/logo/strimzi_logo.png
8 | keywords:
9 | - kafka
10 | - queue
11 | - stream
12 | - event
13 | - messaging
14 | - datastore
15 | - topic
16 | home: https://strimzi.io/
17 | sources:
18 | - https://github.com/strimzi/kafka-access-operator
19 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/README.md:
--------------------------------------------------------------------------------
1 | # Strimzi Access Operator
2 |
3 | Strimzi Access Operator provides a Kubernetes operator to help applications bind to an [Apache Kafka®](https://kafka.apache.org) cluster that is managed by the [Strimzi](https://strimzi.io) cluster operator.
4 |
5 | The operator creates a single Kubernetes `Secret` resource containing all the connection details for the Kafka cluster.
6 | The removes the need for applications to query multiple Kubernetes resources to get connection information.
7 | The `Secret` follows the conventions laid out in the [Service Binding Specification for Kubernetes v1.0.0](https://servicebinding.io/spec/core/1.0.0/).
8 |
9 | The operator is built using the [Java Operator SDK](https://github.com/java-operator-sdk/java-operator-sdk).
10 |
11 | ## Running the Access Operator
12 |
13 | For the operator to start successfully you need the Strimzi `Kafka` and `KafkaUser` custom resource definitions installed in your Kubernetes cluster.
14 | You can get these from the Strimzi [GitHub repository](https://github.com/strimzi/strimzi-kafka-operator/tree/main/install/cluster-operator),
15 | or use the [Strimzi quickstart guide](https://strimzi.io/quickstarts/) to also deploy the Strimzi cluster operator and a Kafka instance at the same time.
16 |
17 | ### Installing the Chart
18 |
19 | To install the chart with the release name `my-strimzi-access-operator`:
20 |
21 | ```bash
22 | $ helm install my-strimzi-access-operator oci://quay.io/strimzi-helm/strimzi-access-operator
23 | ```
24 |
25 | The command deploys the Strimzi Access Operator on the Kubernetes cluster with the default configuration.
26 |
27 | ### Uninstalling the Chart
28 |
29 | To uninstall/delete the `my-strimzi-access-operator` deployment:
30 |
31 | ```bash
32 | $ helm delete my-strimzi-access-operator
33 | ```
34 |
35 | The command removes all the Kubernetes components associated with the Strimzi Access Operator utility and deletes the release.
36 |
37 | ### Configuration
38 |
39 | The following table lists some available configurable parameters of the Strimzi chart and their default values.
40 | For a full list of supported options, check the [`values.yaml` file](./values.yaml).
41 |
42 | | Parameter | Description | Default |
43 | |--------------------------------------|-----------------------------------------------------------|----------|
44 | | `image.tag` | Override default Drain Cleaner image tag | `0.1.1` |
45 | | `image.imagePullPolicy` | Image pull policy for all pods deployed by Drain Cleaner | `nil` |
46 | | `resources.limits.cpu` | Configures the CPU limit for the Access Operator Pod | `256Mi` |
47 | | `resources.limits.memory` | Configures the memory limit for the Access Operator Pod | `500m` |
48 | | `resources.requests.cpu` | Configures the CPU request for the Access Operator Pod | `256Mi` |
49 | | `resources.requests.memory` | Configures the memory request for the Access Operator Pod | `100m` |
50 | | `livenessProbe.initialDelaySeconds` | Liveness probe initial delay (in seconds) | `10` |
51 | | `livenessProbe.periodSeconds` | Liveness probe period (in seconds) | `30` |
52 | | `readinessProbe.initialDelaySeconds` | Readiness probe initial delay (in seconds) | `10` |
53 | | `readinessProbe.periodSeconds` | Readiness probe period (in seconds) | `30` |
54 |
55 | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
56 |
57 | ```bash
58 | $ helm install my-strimzi-access-operator --set replicaCount=2 oci://quay.io/strimzi-helm/strimzi-access-operator
59 | ```
60 |
61 | ## Using the Access Operator
62 |
63 | To make use of the Access Operator, create a `KafkaAccess` custom resource (CR).
64 | You must specify the name of the `Kafka` CR you want to connect to.
65 | You can optionally also specify the name of the listener in the `Kafka` CR and a `KafkaUser`.
66 | See the [examples folder](https://github.com/strimzi/kafka-access-operator/tree/main/examples) for some valid `KafkaAccess` specifications.
67 |
68 | If you do not specify which listener you want to connect to, the operator uses the following rules to choose a listener:
69 | 1. If there is only one listener configured in the `Kafka` CR, that listener is chosen.
70 | 2. If there are multiple listeners listed in the `Kafka` CR, the operator filters the list by comparing the `tls` and `authentication` properties in the `Kafka` and `KafkaUser` CRs to select a listener with the appropriate security.
71 | 3. If there are multiple listeners with appropriate security, the operator chooses the one that is of type `internal`.
72 | 4. If there are multiple internal listeners with appropriate security, the operator sorts the listeners alphabetically by name, and chooses the first one.
73 |
74 | Once the Access Operator has created the binding `Secret`, it updates the `KafkaAccess` custom resource to put the name of the secret in the status, for example:
75 |
76 | ```yaml
77 | ...
78 | status:
79 | binding:
80 | name: kafka-binding
81 | ```
82 |
83 | The `Secret` created by the Access Operator has the following structure:
84 |
85 | ```yaml
86 | apiVersion: v1
87 | kind: Secret
88 | metadata:
89 | name: kafka-binding
90 | type: servicebinding.io/kafka
91 | data:
92 | type: kafka
93 | provider: strimzi
94 |
95 | bootstrap.servers: # comma separated list of host:port for Kafka
96 | bootstrap-servers: # comma separated list of host:port for Kafka
97 | bootstrapServers: # comma separated list of host:port for Kafka
98 |
99 | security.protocol: # one of PLAINTEXT, SASL_PLAINTEXT, SASL_SSL or SSL
100 | securityProtocol: # one of PLAINTEXT, SASL_PLAINTEXT, SASL_SSL or SSL
101 |
102 | # Provided if TLS enabled:
103 | ssl.truststore.crt: # Strimzi cluster CA certificate
104 |
105 | # Provided if selected user is SCRAM auth:
106 | username: # SCRAM username
107 | password: # SCRAM password
108 | sasl.jaas.config: # sasl jaas config string for use by Java applications
109 | sasl.mechanism: SCRAM-SHA-512
110 | saslMechanism: SCRAM-SHA-512
111 |
112 | # Provided if selected user is mTLS:
113 | ssl.keystore.crt: # certificate for the consuming client signed by the clients' CA
114 | ssl.keystore.key: # private key for the consuming client
115 | ```
116 |
117 | Developers can make this `Secret` available to their applications themselves, or use an operator that implements the [Service Binding specification](https://servicebinding.io/spec/core/1.0.0/) to do it.
118 |
119 | ## Getting help
120 |
121 | If you encounter any issues while using the Access Operator, you can get help through the following methods:
122 |
123 | - [Strimzi Users mailing list](https://lists.cncf.io/g/cncf-strimzi-users/topics)
124 | - [#strimzi channel on CNCF Slack](https://slack.cncf.io/)
125 | - [GitHub Discussions](https://github.com/orgs/strimzi/discussions)
126 |
127 | ## Contributing
128 |
129 | You can contribute by:
130 | - Raising any issues you find using the Access Operator
131 | - Fixing issues by opening Pull Requests
132 | - Improving documentation
133 | - Talking about the Strimzi Access Operator
134 |
135 | All bugs, tasks or enhancements are tracked as [GitHub issues](https://github.com/strimzi/kafka-access-operator/issues).
136 |
137 | The [dev guide](https://github.com/strimzi/kafka-access-operator/blob/main/development-docs/DEV_GUIDE.md) describes how to build the operator and how to test your changes before submitting a patch or opening a PR.
138 |
139 | If you want to get in touch with us first before contributing, you can use:
140 |
141 | - [Strimzi Dev mailing list](https://lists.cncf.io/g/cncf-strimzi-dev/topics)
142 | - [#strimzi channel on CNCF Slack](https://slack.cncf.io/)
143 |
144 | Learn more on how you can contribute on our [Join Us](https://strimzi.io/join-us/) page.
145 |
146 | ## License
147 |
148 | Strimzi Access Operator is licensed under the [Apache License](./LICENSE), Version 2.0
149 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/crds/040-Crd-kafkaaccess.yaml:
--------------------------------------------------------------------------------
1 | # Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: kafkaaccesses.access.strimzi.io
6 | labels:
7 | servicebinding.io/provisioned-service: "true"
8 | spec:
9 | group: access.strimzi.io
10 | names:
11 | kind: KafkaAccess
12 | plural: kafkaaccesses
13 | shortNames:
14 | - ka
15 | singular: kafkaaccess
16 | scope: Namespaced
17 | versions:
18 | - additionalPrinterColumns:
19 | - jsonPath: .spec.kafka.listener
20 | name: Listener
21 | priority: 0
22 | type: string
23 | - jsonPath: .spec.kafka.name
24 | name: Cluster
25 | priority: 0
26 | type: string
27 | - jsonPath: .spec.user.name
28 | name: User
29 | priority: 0
30 | type: string
31 | name: v1alpha1
32 | schema:
33 | openAPIV3Schema:
34 | properties:
35 | spec:
36 | properties:
37 | kafka:
38 | properties:
39 | listener:
40 | type: string
41 | name:
42 | type: string
43 | namespace:
44 | type: string
45 | required:
46 | - name
47 | type: object
48 | user:
49 | properties:
50 | apiGroup:
51 | type: string
52 | kind:
53 | type: string
54 | name:
55 | type: string
56 | namespace:
57 | type: string
58 | required:
59 | - apiGroup
60 | - kind
61 | - name
62 | type: object
63 | required:
64 | - kafka
65 | type: object
66 | status:
67 | properties:
68 | binding:
69 | properties:
70 | name:
71 | type: string
72 | type: object
73 | conditions:
74 | items:
75 | properties:
76 | additionalProperties:
77 | additionalProperties:
78 | type: object
79 | type: object
80 | lastTransitionTime:
81 | type: string
82 | message:
83 | type: string
84 | reason:
85 | type: string
86 | status:
87 | type: string
88 | type:
89 | type: string
90 | type: object
91 | type: array
92 | observedGeneration:
93 | type: integer
94 | type: object
95 | type: object
96 | served: true
97 | storage: true
98 | subresources:
99 | status: {}
100 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/templates/010-ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | namespace: {{ .Release.Namespace }}
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/templates/020-ClusterRole.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | rules:
9 | - apiGroups:
10 | - "access.strimzi.io"
11 | resources:
12 | - kafkaaccesses
13 | - kafkaaccesses/status
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | - create
19 | - delete
20 | - patch
21 | - update
22 | - apiGroups:
23 | - "kafka.strimzi.io"
24 | resources:
25 | - kafkas
26 | - kafkausers
27 | verbs:
28 | - get
29 | - list
30 | - watch
31 | - apiGroups:
32 | - ""
33 | resources:
34 | - secrets
35 | verbs:
36 | - get
37 | - list
38 | - watch
39 | - create
40 | - delete
41 | - patch
42 | - update
43 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/templates/030-ClusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | subjects:
9 | - kind: ServiceAccount
10 | name: strimzi-access-operator
11 | namespace: {{ .Release.Namespace }}
12 | roleRef:
13 | kind: ClusterRole
14 | name: strimzi-access-operator
15 | apiGroup: rbac.authorization.k8s.io
16 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/templates/050-Deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | namespace: {{ .Release.Namespace }}
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | app: strimzi-access-operator
14 | strimzi.io/kind: access-operator
15 | strategy:
16 | type: Recreate
17 | template:
18 | metadata:
19 | labels:
20 | app: strimzi-access-operator
21 | strimzi.io/kind: access-operator
22 | spec:
23 | serviceAccountName: strimzi-access-operator
24 | volumes:
25 | - name: strimzi-tmp
26 | emptyDir:
27 | medium: Memory
28 | sizeLimit: 1Mi
29 | containers:
30 | - name: access-operator
31 | image: quay.io/strimzi/access-operator:{{ .Values.image.tag }}
32 | {{- if .Values.image.pullPolicy }}
33 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
34 | {{- end }}
35 | ports:
36 | - containerPort: 8080
37 | name: http
38 | args:
39 | - /opt/strimzi/bin/access_operator_run.sh
40 | volumeMounts:
41 | - name: strimzi-tmp
42 | mountPath: /tmp
43 | resources:
44 | {{ toYaml .Values.resources | indent 12 }}
45 | livenessProbe:
46 | httpGet:
47 | path: /healthy
48 | port: http
49 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
50 | periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
51 | readinessProbe:
52 | httpGet:
53 | path: /ready
54 | port: http
55 | initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
56 | periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
57 |
--------------------------------------------------------------------------------
/helm-charts/helm3/strimzi-access-operator/values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | tag: 0.1.1
3 |
4 | resources:
5 | limits:
6 | memory: 256Mi
7 | cpu: 500m
8 | requests:
9 | memory: 256Mi
10 | cpu: 100m
11 | livenessProbe:
12 | initialDelaySeconds: 10
13 | periodSeconds: 30
14 | readinessProbe:
15 | initialDelaySeconds: 10
16 | periodSeconds: 30
17 |
--------------------------------------------------------------------------------
/install/000-Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 |
--------------------------------------------------------------------------------
/install/010-ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | namespace: strimzi-access-operator
8 |
--------------------------------------------------------------------------------
/install/020-ClusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | rules:
8 | - apiGroups:
9 | - "access.strimzi.io"
10 | resources:
11 | - kafkaaccesses
12 | - kafkaaccesses/status
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 | - create
18 | - delete
19 | - patch
20 | - update
21 | - apiGroups:
22 | - "kafka.strimzi.io"
23 | resources:
24 | - kafkas
25 | - kafkausers
26 | verbs:
27 | - get
28 | - list
29 | - watch
30 | - apiGroups:
31 | - ""
32 | resources:
33 | - secrets
34 | verbs:
35 | - get
36 | - list
37 | - watch
38 | - create
39 | - delete
40 | - patch
41 | - update
42 |
--------------------------------------------------------------------------------
/install/030-ClusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | subjects:
8 | - kind: ServiceAccount
9 | name: strimzi-access-operator
10 | namespace: strimzi-access-operator
11 | roleRef:
12 | kind: ClusterRole
13 | name: strimzi-access-operator
14 | apiGroup: rbac.authorization.k8s.io
15 |
--------------------------------------------------------------------------------
/install/040-Crd-kafkaaccess.yaml:
--------------------------------------------------------------------------------
1 | # Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: kafkaaccesses.access.strimzi.io
6 | labels:
7 | servicebinding.io/provisioned-service: "true"
8 | spec:
9 | group: access.strimzi.io
10 | names:
11 | kind: KafkaAccess
12 | plural: kafkaaccesses
13 | shortNames:
14 | - ka
15 | singular: kafkaaccess
16 | scope: Namespaced
17 | versions:
18 | - additionalPrinterColumns:
19 | - jsonPath: .spec.kafka.listener
20 | name: Listener
21 | priority: 0
22 | type: string
23 | - jsonPath: .spec.kafka.name
24 | name: Cluster
25 | priority: 0
26 | type: string
27 | - jsonPath: .spec.user.name
28 | name: User
29 | priority: 0
30 | type: string
31 | name: v1alpha1
32 | schema:
33 | openAPIV3Schema:
34 | properties:
35 | spec:
36 | properties:
37 | kafka:
38 | properties:
39 | listener:
40 | type: string
41 | name:
42 | type: string
43 | namespace:
44 | type: string
45 | required:
46 | - name
47 | type: object
48 | user:
49 | properties:
50 | apiGroup:
51 | type: string
52 | kind:
53 | type: string
54 | name:
55 | type: string
56 | namespace:
57 | type: string
58 | required:
59 | - apiGroup
60 | - kind
61 | - name
62 | type: object
63 | required:
64 | - kafka
65 | type: object
66 | status:
67 | properties:
68 | binding:
69 | properties:
70 | name:
71 | type: string
72 | type: object
73 | conditions:
74 | items:
75 | properties:
76 | additionalProperties:
77 | additionalProperties:
78 | type: object
79 | type: object
80 | lastTransitionTime:
81 | type: string
82 | message:
83 | type: string
84 | reason:
85 | type: string
86 | status:
87 | type: string
88 | type:
89 | type: string
90 | type: object
91 | type: array
92 | observedGeneration:
93 | type: integer
94 | type: object
95 | type: object
96 | served: true
97 | storage: true
98 | subresources:
99 | status: {}
100 |
--------------------------------------------------------------------------------
/install/050-Deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | namespace: strimzi-access-operator
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: strimzi-access-operator
13 | strimzi.io/kind: access-operator
14 | strategy:
15 | type: Recreate
16 | template:
17 | metadata:
18 | labels:
19 | app: strimzi-access-operator
20 | strimzi.io/kind: access-operator
21 | spec:
22 | serviceAccountName: strimzi-access-operator
23 | volumes:
24 | - name: strimzi-tmp
25 | emptyDir:
26 | medium: Memory
27 | sizeLimit: 1Mi
28 | containers:
29 | - name: access-operator
30 | image: quay.io/strimzi/access-operator:0.1.1
31 | ports:
32 | - containerPort: 8080
33 | name: http
34 | args:
35 | - /opt/strimzi/bin/access_operator_run.sh
36 | volumeMounts:
37 | - name: strimzi-tmp
38 | mountPath: /tmp
39 | resources:
40 | limits:
41 | cpu: 500m
42 | memory: 256Mi
43 | requests:
44 | cpu: 100m
45 | memory: 256Mi
46 | livenessProbe:
47 | httpGet:
48 | path: /healthy
49 | port: http
50 | initialDelaySeconds: 10
51 | periodSeconds: 30
52 | readinessProbe:
53 | httpGet:
54 | path: /ready
55 | port: http
56 | initialDelaySeconds: 10
57 | periodSeconds: 30
58 |
--------------------------------------------------------------------------------
/operator/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | io.strimzi.access-operator
8 | kafka-access-operator
9 | 0.2.0-SNAPSHOT
10 |
11 |
12 | operator
13 |
14 |
15 | 17
16 | 17
17 | UTF-8
18 |
19 |
20 |
21 |
22 | io.javaoperatorsdk
23 | operator-framework-core
24 |
25 |
26 | io.fabric8
27 | kubernetes-httpclient-okhttp
28 |
29 |
30 |
31 |
32 | io.fabric8
33 | kubernetes-httpclient-jdk
34 |
35 |
36 | io.fabric8
37 | kubernetes-client-api
38 |
39 |
40 | io.fabric8
41 | kubernetes-model-core
42 |
43 |
44 |
45 | io.fabric8
46 | crd-generator-api-v2
47 | provided
48 |
49 |
50 | io.strimzi
51 | api
52 |
53 |
54 | io.fabric8
55 | kubernetes-client
56 |
57 |
58 |
59 |
60 | org.apache.kafka
61 | kafka-clients
62 |
63 |
64 | org.slf4j
65 | slf4j-api
66 |
67 |
68 | org.apache.logging.log4j
69 | log4j-slf4j-impl
70 |
71 |
72 | org.eclipse.jetty
73 | jetty-server
74 |
75 |
76 | org.eclipse.jetty
77 | jetty-servlet
78 |
79 |
80 | org.eclipse.jetty.toolchain
81 | jetty-jakarta-servlet-api
82 |
83 |
84 |
85 | org.junit.jupiter
86 | junit-jupiter-api
87 | test
88 |
89 |
90 | org.junit.jupiter
91 | junit-jupiter-params
92 | test
93 |
94 |
95 | org.assertj
96 | assertj-core
97 | test
98 |
99 |
100 | io.fabric8
101 | kubernetes-server-mock
102 | test
103 |
104 |
105 | org.mockito
106 | mockito-core
107 | test
108 |
109 |
110 | io.strimzi.access-operator
111 | api
112 |
113 |
114 |
115 |
116 |
117 |
118 | org.apache.maven.plugins
119 | maven-assembly-plugin
120 | ${maven.assembly.version}
121 |
122 |
123 | package
124 |
125 | single
126 |
127 |
128 | false
129 |
130 | ${project.basedir}/src/assembly/dist.xml
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
--------------------------------------------------------------------------------
/operator/src/assembly/dist.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 | dist
6 | false
7 |
8 |
9 | tar.gz
10 | zip
11 | dir
12 |
13 |
14 |
15 |
16 | ${project.parent.basedir}/bin
17 | /bin
18 | 0755
19 |
20 |
21 | ${project.parent.basedir}
22 |
23 | README*
24 | LICENSE*
25 |
26 | /
27 | 0644
28 |
29 |
30 |
31 |
32 |
33 | runtime
34 | /libs
35 | 0644
36 | ${artifact.groupId}.${artifact.artifactId}-${artifact.version}${dashClassifier?}.${artifact.extension}
37 |
38 |
39 |
--------------------------------------------------------------------------------
/operator/src/main/java/io/strimzi/kafka/access/KafkaAccessOperator.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access;
6 |
7 | import io.javaoperatorsdk.operator.Operator;
8 | import io.strimzi.kafka.access.server.HealthServlet;
9 | import org.eclipse.jetty.server.Server;
10 | import org.eclipse.jetty.servlet.ServletHandler;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 |
14 | /**
15 | * The main operator class for Strimzi Access Operator
16 | */
17 | public class KafkaAccessOperator {
18 |
19 | private static final Logger LOGGER = LoggerFactory.getLogger(KafkaAccessOperator.class);
20 | private static final int HEALTH_CHECK_PORT = 8080;
21 |
22 | /**
23 | * Initializes the operator and runs a servlet for health checking
24 | *
25 | * @param args Main method arguments
26 | */
27 | public static void main(final String[] args) {
28 | LOGGER.info("Kafka Access operator starting");
29 | final Operator operator = new Operator();
30 | operator.register(new KafkaAccessReconciler(operator.getKubernetesClient()));
31 | operator.start();
32 | Server server = new Server(HEALTH_CHECK_PORT);
33 | ServletHandler handler = new ServletHandler();
34 | server.setHandler(handler);
35 | handler.addServletWithMapping(HealthServlet.class, "/healthy");
36 | handler.addServletWithMapping(HealthServlet.class, "/ready");
37 | try {
38 | server.start();
39 | LOGGER.info("Kafka Access operator is now ready (health server listening)");
40 | server.join();
41 | } catch (Exception e) {
42 | LOGGER.error("Failed to start health server", e);
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/operator/src/main/java/io/strimzi/kafka/access/SecretDependentResource.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access;
6 |
7 | import io.fabric8.kubernetes.api.model.Secret;
8 | import io.javaoperatorsdk.operator.api.reconciler.Context;
9 | import io.javaoperatorsdk.operator.processing.event.ResourceID;
10 | import io.javaoperatorsdk.operator.processing.event.source.informer.InformerEventSource;
11 | import io.strimzi.api.kafka.model.kafka.Kafka;
12 | import io.strimzi.api.kafka.model.kafka.KafkaResources;
13 | import io.strimzi.api.kafka.model.user.KafkaUser;
14 | import io.strimzi.api.kafka.model.user.KafkaUserAuthentication;
15 | import io.strimzi.api.kafka.model.user.KafkaUserSpec;
16 | import io.strimzi.api.kafka.model.user.KafkaUserStatus;
17 | import io.strimzi.kafka.access.internal.CustomResourceParseException;
18 | import io.strimzi.kafka.access.internal.KafkaListener;
19 | import io.strimzi.kafka.access.internal.KafkaParser;
20 | import io.strimzi.kafka.access.internal.KafkaUserData;
21 | import io.strimzi.kafka.access.internal.MissingKubernetesResourceException;
22 | import io.strimzi.kafka.access.model.KafkaAccess;
23 | import io.strimzi.kafka.access.model.KafkaAccessSpec;
24 | import io.strimzi.kafka.access.model.KafkaReference;
25 | import io.strimzi.kafka.access.model.KafkaUserReference;
26 | import org.slf4j.Logger;
27 | import org.slf4j.LoggerFactory;
28 |
29 | import java.nio.charset.StandardCharsets;
30 | import java.util.Base64;
31 | import java.util.HashMap;
32 | import java.util.Map;
33 | import java.util.Optional;
34 | import java.util.function.Supplier;
35 |
36 | /**
37 | * Class to represent the data in the Secret created by the operator
38 | * In future updates this could be updated to implement the
39 | * Java Operator SDK DependentResource class
40 | */
41 | public class SecretDependentResource {
42 |
43 | private static final String TYPE_SECRET_KEY = "type";
44 | private static final String TYPE_SECRET_VALUE = "kafka";
45 | private static final String PROVIDER_SECRET_KEY = "provider";
46 | private static final String PROVIDER_SECRET_VALUE = "strimzi";
47 | private final Map commonSecretData = new HashMap<>();
48 |
49 | private static final Logger LOGGER = LoggerFactory.getLogger(SecretDependentResource.class);
50 |
51 | /**
52 | * Default constructor that initialises the common secret data
53 | */
54 | public SecretDependentResource() {
55 | final Base64.Encoder encoder = Base64.getEncoder();
56 | commonSecretData.put(TYPE_SECRET_KEY, encoder.encodeToString(TYPE_SECRET_VALUE.getBytes(StandardCharsets.UTF_8)));
57 | commonSecretData.put(PROVIDER_SECRET_KEY, encoder.encodeToString(PROVIDER_SECRET_VALUE.getBytes(StandardCharsets.UTF_8)));
58 | }
59 |
60 | /**
61 | * The desired state of the data in the secret
62 | * @param spec The spec of the KafkaAccess resource being reconciled
63 | * @param namespace The namespace of the KafkaAccess resource being reconciled
64 | * @param context The event source context
65 | * @return The data for the Secret as a Map
66 | */
67 | public Map desired(final KafkaAccessSpec spec, final String namespace, final Context context) {
68 | final KafkaReference kafkaReference = spec.getKafka();
69 | final String kafkaClusterName = kafkaReference.getName();
70 | final String kafkaClusterNamespace = Optional.ofNullable(kafkaReference.getNamespace()).orElse(namespace);
71 | final Kafka kafka = context.getSecondaryResource(Kafka.class).orElseThrow(missingKubernetesResourceException("Kafka", kafkaClusterNamespace, kafkaClusterName));
72 | final Map data = new HashMap<>(commonSecretData);
73 | final KafkaListener listener;
74 | String kafkaUserType = null;
75 | final Optional kafkaUserReference = Optional.ofNullable(spec.getUser());
76 | if (kafkaUserReference.isPresent()) {
77 | if (!KafkaUser.RESOURCE_KIND.equals(kafkaUserReference.get().getKind()) || !KafkaUser.RESOURCE_GROUP.equals(kafkaUserReference.get().getApiGroup())) {
78 | throw new IllegalStateException(String.format("User kind must be %s and apiGroup must be %s", KafkaUser.RESOURCE_KIND, KafkaUser.RESOURCE_GROUP));
79 | }
80 | final String kafkaUserName = kafkaUserReference.get().getName();
81 | final String kafkaUserNamespace = Optional.ofNullable(kafkaUserReference.get().getNamespace()).orElse(namespace);
82 | final KafkaUser kafkaUser = context.getSecondaryResource(KafkaUser.class).orElseThrow(missingKubernetesResourceException("KafkaUser", kafkaUserNamespace, kafkaUserName));
83 | kafkaUserType = Optional.ofNullable(kafkaUser.getSpec())
84 | .map(KafkaUserSpec::getAuthentication)
85 | .map(KafkaUserAuthentication::getType)
86 | .orElse(KafkaParser.USER_AUTH_UNDEFINED);
87 | data.putAll(getKafkaUserSecretData(context, kafkaUser, kafkaUserName, kafkaUserNamespace));
88 | }
89 | try {
90 | listener = KafkaParser.getKafkaListener(kafka, spec, kafkaUserType);
91 | } catch (CustomResourceParseException e) {
92 | LOGGER.error("Reconcile failed due to ParserException " + e.getMessage());
93 | throw e;
94 | }
95 | if (listener.isTls()) {
96 | listener.withCaCertSecret(getKafkaCaCertData(context, kafkaClusterName, kafkaClusterNamespace));
97 | }
98 | data.putAll(listener.getConnectionSecretData());
99 | return data;
100 | }
101 |
102 | private Map getKafkaUserSecretData(final Context context, final KafkaUser kafkaUser, final String kafkaUserName, final String kafkaUserNamespace) {
103 | final String userSecretName = Optional.ofNullable(kafkaUser.getStatus())
104 | .map(KafkaUserStatus::getSecret)
105 | .orElseThrow(missingKubernetesResourceException("Secret in KafkaUser status", kafkaUserNamespace, kafkaUserName));
106 | final InformerEventSource kafkaUserSecretEventSource = (InformerEventSource) context.eventSourceRetriever()
107 | .getResourceEventSourceFor(Secret.class, KafkaAccessReconciler.KAFKA_USER_SECRET_EVENT_SOURCE);
108 | final Secret kafkaUserSecret = kafkaUserSecretEventSource.get(new ResourceID(userSecretName, kafkaUserNamespace))
109 | .orElseThrow(missingKubernetesResourceException(String.format("Secret %s for KafkaUser", userSecretName), kafkaUserNamespace, kafkaUserName));
110 | return new KafkaUserData(kafkaUser).withSecret(kafkaUserSecret).getConnectionSecretData();
111 | }
112 |
113 | private Map getKafkaCaCertData(final Context context, String kafkaClusterName, String kafkaClusterNamespace) {
114 | final String caCertSecretName = KafkaResources.clusterCaCertificateSecretName(kafkaClusterName);
115 | final InformerEventSource strimziSecretEventSource = (InformerEventSource) context.eventSourceRetriever()
116 | .getResourceEventSourceFor(Secret.class, KafkaAccessReconciler.STRIMZI_SECRET_EVENT_SOURCE);
117 | return strimziSecretEventSource.get(new ResourceID(caCertSecretName, kafkaClusterNamespace))
118 | .map(Secret::getData)
119 | .orElse(Map.of());
120 | }
121 |
122 | private static Supplier missingKubernetesResourceException(String type, String namespace, String name) {
123 | return () -> new MissingKubernetesResourceException(String.format("%s %s/%s missing", type, namespace, name));
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/operator/src/main/java/io/strimzi/kafka/access/internal/CustomResourceParseException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.internal;
6 |
7 | /**
8 | * The class for custom resource parsing exception
9 | */
10 | public class CustomResourceParseException extends RuntimeException {
11 |
12 | /**
13 | * Default constructor
14 | */
15 | public CustomResourceParseException() {}
16 |
17 | /**
18 | * Constructor
19 | *
20 | * @param message The exception message
21 | */
22 | public CustomResourceParseException(final String message) {
23 | super(message);
24 | }
25 |
26 | /**
27 | * Constructor
28 | *
29 | * @param message The exception message
30 | * @param cause The exception cause
31 | */
32 | public CustomResourceParseException(final String message, final Throwable cause) {
33 | super(message, cause);
34 | }
35 |
36 | /**
37 | * Constructor
38 | *
39 | * @param cause The exception cause
40 | */
41 | public CustomResourceParseException(final Throwable cause) {
42 | super(cause);
43 | }
44 |
45 | }
46 |
--------------------------------------------------------------------------------
/operator/src/main/java/io/strimzi/kafka/access/internal/KafkaListener.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.internal;
6 |
7 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerAuthentication;
8 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerAuthenticationScramSha512;
9 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerAuthenticationTls;
10 | import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListener;
11 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType;
12 | import org.apache.kafka.clients.CommonClientConfigs;
13 | import org.apache.kafka.common.security.auth.SecurityProtocol;
14 |
15 | import java.nio.charset.StandardCharsets;
16 | import java.util.Base64;
17 | import java.util.HashMap;
18 | import java.util.Map;
19 | import java.util.Optional;
20 |
21 | import static io.strimzi.kafka.access.internal.KafkaParser.LISTENER_AUTH_NONE;
22 |
23 | /**
24 | * Representation of a Kafka listener that returns the connection details for the listener
25 | */
26 | public class KafkaListener {
27 |
28 | private final String name;
29 | private final KafkaListenerType type;
30 | private final boolean tls;
31 | private final String authenticationType;
32 | private String bootstrapServer;
33 | private Map caCertSecret;
34 |
35 | /**
36 | * Constructor
37 | *
38 | * @param genericListener The generic listener for Kafka
39 | */
40 | public KafkaListener(final GenericKafkaListener genericListener) {
41 | this.name = genericListener.getName();
42 | this.type = genericListener.getType();
43 | this.tls = genericListener.isTls();
44 | this.authenticationType = Optional.ofNullable(genericListener.getAuth()).map(KafkaListenerAuthentication::getType).orElse(LISTENER_AUTH_NONE);
45 | }
46 |
47 | /**
48 | * Decorates a KafkaListener instance with boostrap server information
49 | *
50 | * @param bootstrapServer The bootstrap server address
51 | * @return A decorated KafkaListener instance
52 | */
53 | public KafkaListener withBootstrapServer(final String bootstrapServer) {
54 | this.bootstrapServer = bootstrapServer;
55 | return this;
56 | }
57 |
58 | /**
59 | * Decorates a KafkaListener instance with boostrap server information
60 | *
61 | * @param caCertSecret The CA certificate secret
62 | * @return A decorated KafkaListener instance
63 | */
64 | public KafkaListener withCaCertSecret(final Map caCertSecret) {
65 | this.caCertSecret = caCertSecret;
66 | return this;
67 | }
68 |
69 | /**
70 | * Gets the name of the Kafka listener
71 | *
72 | * @return A name for the Kafka listener
73 | */
74 | public String getName() {
75 | return name;
76 | }
77 |
78 | /**
79 | * Gets the type of the Kafka listener
80 | *
81 | * @return A type of the Kafka listener
82 | */
83 | public KafkaListenerType getType() {
84 | return type;
85 | }
86 |
87 | /**
88 | * Gets the bootstrap server address
89 | *
90 | * @return A bootstrap server address
91 | */
92 | public String getBootstrapServer() {
93 | return bootstrapServer;
94 | }
95 |
96 | /**
97 | * Returns the boolean value for whether the TLS is enabled or not
98 | *
99 | * @return A boolean value for TLS
100 | */
101 | public boolean isTls() {
102 | return tls;
103 | }
104 |
105 | /**
106 | * Gets the authentication type
107 | *
108 | * @return An authentication type
109 | */
110 | public String getAuthenticationType() {
111 | return authenticationType;
112 | }
113 |
114 | /**
115 | * Collects the connection data for the Kafka resource
116 | *
117 | * @return A map of the connection data
118 | */
119 | public Map getConnectionSecretData() {
120 | final Base64.Encoder encode = Base64.getEncoder();
121 | final SecurityProtocol securityProtocol = getSecurityProtocol();
122 | final Map data = new HashMap<>();
123 | final String bootstrapServers = encode.encodeToString(this.bootstrapServer.getBytes(StandardCharsets.UTF_8));
124 | data.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
125 | final String encodedSecurityProtocol = encode.encodeToString(securityProtocol.name.getBytes(StandardCharsets.UTF_8));
126 | data.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, encodedSecurityProtocol);
127 | //quarkus settings
128 | data.put("bootstrapServers", bootstrapServers);
129 | data.put("securityProtocol", encodedSecurityProtocol);
130 | //Spring settings
131 | data.put("bootstrap-servers", bootstrapServers);
132 | if (this.tls) {
133 | Optional.ofNullable(this.caCertSecret)
134 | .map(secretData -> secretData.get("ca.crt"))
135 | .ifPresent(cert -> data.put("ssl.truststore.crt", cert));
136 | }
137 | return data;
138 | }
139 |
140 | private SecurityProtocol getSecurityProtocol() {
141 | final SecurityProtocol securityProtocol;
142 | switch (this.authenticationType) {
143 | case LISTENER_AUTH_NONE:
144 | securityProtocol = this.tls ? SecurityProtocol.SSL : SecurityProtocol.PLAINTEXT;
145 | break;
146 | case KafkaListenerAuthenticationTls.TYPE_TLS:
147 | securityProtocol = SecurityProtocol.SSL;
148 | break;
149 | case KafkaListenerAuthenticationScramSha512.SCRAM_SHA_512:
150 | securityProtocol = this.tls ? SecurityProtocol.SASL_SSL : SecurityProtocol.SASL_PLAINTEXT;
151 | break;
152 | default:
153 | securityProtocol = SecurityProtocol.PLAINTEXT;
154 | }
155 | return securityProtocol;
156 | }
157 | }
158 |
--------------------------------------------------------------------------------
/operator/src/main/java/io/strimzi/kafka/access/internal/KafkaUserData.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.internal;
6 |
7 | import io.fabric8.kubernetes.api.model.Secret;
8 | import io.strimzi.api.kafka.model.user.KafkaUser;
9 | import io.strimzi.api.kafka.model.user.KafkaUserAuthentication;
10 | import io.strimzi.api.kafka.model.user.KafkaUserScramSha512ClientAuthentication;
11 | import io.strimzi.api.kafka.model.user.KafkaUserSpec;
12 | import io.strimzi.api.kafka.model.user.KafkaUserTlsClientAuthentication;
13 | import io.strimzi.api.kafka.model.user.KafkaUserTlsExternalClientAuthentication;
14 | import io.strimzi.api.kafka.model.user.KafkaUserStatus;
15 | import org.apache.kafka.common.config.SaslConfigs;
16 |
17 | import java.nio.charset.StandardCharsets;
18 | import java.util.Base64;
19 | import java.util.HashMap;
20 | import java.util.Map;
21 | import java.util.Optional;
22 |
23 | import static io.strimzi.kafka.access.internal.KafkaParser.USER_AUTH_UNDEFINED;
24 |
25 | /**
26 | * Representation of a Kafka user data that returns the secret details
27 | */
28 | public class KafkaUserData {
29 |
30 | private final Map rawUserData = new HashMap<>();
31 | private final String authType;
32 |
33 | /**
34 | * Constructor
35 | *
36 | * @param kafkaUser The KafkaUser resource
37 | */
38 | public KafkaUserData(final KafkaUser kafkaUser) {
39 | Optional.ofNullable(kafkaUser.getStatus())
40 | .map(KafkaUserStatus::getUsername)
41 | .ifPresent(username -> rawUserData.put("username", Base64.getEncoder().encodeToString(username.getBytes(StandardCharsets.UTF_8))));
42 | this.authType = Optional.ofNullable(kafkaUser.getSpec())
43 | .map(KafkaUserSpec::getAuthentication)
44 | .map(KafkaUserAuthentication::getType)
45 | .orElse(USER_AUTH_UNDEFINED);
46 | }
47 |
48 | /**
49 | * Decorates a KafkaUserData instance with secret information
50 | *
51 | * @param secret The Kubernetes secret
52 | * @return A KafkaUserData instance
53 | */
54 | public KafkaUserData withSecret(final Secret secret) {
55 | Optional.ofNullable(secret.getData())
56 | .ifPresent(rawUserData::putAll);
57 | return this;
58 | }
59 |
60 | /**
61 | * Collects the connection data for connecting to Kafka with this Kafka User
62 | *
63 | * @return A map of the connection data
64 | */
65 | public Map getConnectionSecretData() {
66 | final Map secretData = new HashMap<>();
67 | if (KafkaUserScramSha512ClientAuthentication.TYPE_SCRAM_SHA_512.equals(authType)) {
68 | Optional.ofNullable(rawUserData.get("username"))
69 | .ifPresent(username -> {
70 | secretData.put("username", username);
71 | secretData.put("user", username); //quarkus
72 | });
73 | final String encodedSaslMechanism = Base64.getEncoder().encodeToString("SCRAM-SHA-512".getBytes(StandardCharsets.UTF_8));
74 | secretData.put(SaslConfigs.SASL_MECHANISM, encodedSaslMechanism);
75 | secretData.put("saslMechanism", encodedSaslMechanism); //quarkus
76 | Optional.ofNullable(rawUserData.get("password"))
77 | .ifPresent(password -> secretData.put("password", password));
78 | Optional.ofNullable(rawUserData.get(SaslConfigs.SASL_JAAS_CONFIG))
79 | .ifPresent(jaasConfig -> secretData.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig));
80 | }
81 | if (KafkaUserTlsClientAuthentication.TYPE_TLS.equals(authType) || KafkaUserTlsExternalClientAuthentication.TYPE_TLS_EXTERNAL.equals(authType)) {
82 | Optional.ofNullable(rawUserData.get("user.crt"))
83 | .ifPresent(cert -> secretData.put("ssl.keystore.crt", cert));
84 | Optional.ofNullable(rawUserData.get("user.key"))
85 | .ifPresent(key -> secretData.put("ssl.keystore.key", key));
86 | }
87 | return secretData;
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/operator/src/main/java/io/strimzi/kafka/access/internal/MissingKubernetesResourceException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.internal;
6 |
7 | /**
8 | * The class for exception when Kuberentes resources are missing
9 | */
10 | public class MissingKubernetesResourceException extends RuntimeException {
11 | /**
12 | * Default constructor
13 | */
14 | public MissingKubernetesResourceException() {}
15 |
16 | /**
17 | * Constructor
18 | *
19 | * @param message The exception message
20 | */
21 | public MissingKubernetesResourceException(final String message) {
22 | super(message);
23 | }
24 |
25 | /**
26 | * Constructor
27 | *
28 | * @param message The exception message
29 | * @param cause The exception cause
30 | */
31 | public MissingKubernetesResourceException(final String message, final Throwable cause) {
32 | super(message, cause);
33 | }
34 |
35 | /**
36 | * Constructor
37 | *
38 | * @param cause The exception cause
39 | */
40 | public MissingKubernetesResourceException(final Throwable cause) {
41 | super(cause);
42 | }
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/operator/src/main/java/io/strimzi/kafka/access/server/HealthServlet.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.server;
6 |
7 | import jakarta.servlet.http.HttpServlet;
8 | import jakarta.servlet.http.HttpServletRequest;
9 | import jakarta.servlet.http.HttpServletResponse;
10 |
11 | import java.io.Serial;
12 |
13 | /**
14 | * Servlet class for health checking of the operator
15 | */
16 | public class HealthServlet extends HttpServlet {
17 |
18 | @Serial
19 | private static final long serialVersionUID = 1L;
20 |
21 | @Override
22 | protected void doGet(HttpServletRequest request, HttpServletResponse response) {
23 | response.setStatus(HttpServletResponse.SC_OK);
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/operator/src/main/resources/log4j2.properties:
--------------------------------------------------------------------------------
1 | name = KafkaAccessOperatorConfig
2 |
3 | appender.console.type = Console
4 | appender.console.name = STDOUT
5 | appender.console.layout.type = PatternLayout
6 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
7 |
8 | rootLogger.level = ${env:STRIMZI_LOG_LEVEL:-INFO}
9 | rootLogger.appenderRefs = stdout
10 | rootLogger.appenderRef.console.ref = STDOUT
11 | rootLogger.additivity = false
12 |
--------------------------------------------------------------------------------
/operator/src/test/java/io/strimzi/kafka/access/Base64Encoder.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access;
6 |
7 | import java.nio.charset.StandardCharsets;
8 | import java.util.Base64;
9 |
10 | public class Base64Encoder {
11 |
12 | private static final Base64.Encoder ENCODER = Base64.getEncoder();
13 |
14 | public static String encodeUtf8(String data) {
15 | return ENCODER.encodeToString(data.getBytes(StandardCharsets.UTF_8));
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/operator/src/test/java/io/strimzi/kafka/access/internal/KafkaListenerTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.internal;
6 |
7 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerAuthenticationScramSha512;
8 | import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListener;
9 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType;
10 | import io.strimzi.kafka.access.ResourceProvider;
11 | import org.apache.kafka.clients.CommonClientConfigs;
12 | import org.apache.kafka.common.security.auth.SecurityProtocol;
13 | import org.junit.jupiter.api.DisplayName;
14 | import org.junit.jupiter.api.Test;
15 |
16 | import java.nio.charset.StandardCharsets;
17 | import java.util.Base64;
18 | import java.util.HashMap;
19 | import java.util.List;
20 | import java.util.Map;
21 |
22 | import static org.assertj.core.api.Assertions.assertThat;
23 |
24 | public class KafkaListenerTest {
25 |
26 | private static final String LISTENER_1 = "listener-1";
27 | private static final String BOOTSTRAP_SERVER_9092 = "my-cluster.svc:9092";
28 |
29 | private final Base64.Encoder encoder = Base64.getEncoder();
30 |
31 | protected static List bootstrapServerKeys() {
32 | return List.of(
33 | CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
34 | "bootstrapServers",
35 | "bootstrap-servers"
36 | );
37 | }
38 |
39 | protected static List securityProtocolKeys() {
40 | return List.of(
41 | CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
42 | "securityProtocol"
43 | );
44 | }
45 |
46 | @Test
47 | @DisplayName("When a Kafka listener with no auth is created, then the connection data is correct and the security protocol is PLAINTEXT")
48 | void testSpecifiedPlainListener() {
49 | final GenericKafkaListener genericKafkaListener = ResourceProvider.getListener(LISTENER_1, KafkaListenerType.INTERNAL, false);
50 | final KafkaListener listener = new KafkaListener(genericKafkaListener).withBootstrapServer(BOOTSTRAP_SERVER_9092);
51 |
52 | final Map secretData = listener.getConnectionSecretData();
53 |
54 | final Map expectedData = new HashMap<>();
55 | bootstrapServerKeys().forEach(key -> expectedData.put(key, encodeToString(BOOTSTRAP_SERVER_9092)));
56 | securityProtocolKeys().forEach(key -> expectedData.put(key, encodeToString(SecurityProtocol.PLAINTEXT.name)));
57 | assertThat(secretData).containsAllEntriesOf(expectedData);
58 | }
59 |
60 |
61 |
62 | @Test
63 | @DisplayName("When Kafka listener with TLS enabled is created, then the connection data is correct and the security protocol is SSL")
64 | void testTLSKafkaListener() {
65 | final String cert = encodeToString("-----BEGIN CERTIFICATE-----\nMIIFLTCCAx\n-----END CERTIFICATE-----\n");
66 | final Map certSecretData = new HashMap<>();
67 | certSecretData.put("ca.crt", cert);
68 | final GenericKafkaListener genericKafkaListener = ResourceProvider.getListener(LISTENER_1, KafkaListenerType.INTERNAL, true);
69 | final KafkaListener listener = new KafkaListener(genericKafkaListener).withBootstrapServer(BOOTSTRAP_SERVER_9092)
70 | .withCaCertSecret(certSecretData);
71 |
72 | final Map secretData = listener.getConnectionSecretData();
73 |
74 | final Map expectedData = new HashMap<>();
75 | bootstrapServerKeys().forEach(key -> expectedData.put(key, encodeToString(BOOTSTRAP_SERVER_9092)));
76 | securityProtocolKeys().forEach(key -> expectedData.put(key, encodeToString(SecurityProtocol.SSL.name)));
77 | expectedData.put("ssl.truststore.crt", cert);
78 | assertThat(secretData).containsAllEntriesOf(expectedData);
79 | }
80 |
81 | @Test
82 | @DisplayName("When a Kafka listener with SASL auth is created, then the connection data is correct and the security protocol is SASL_PLAINTEXT")
83 | void testKafkaListenerWithSaslAuth() {
84 | final GenericKafkaListener genericKafkaListener = ResourceProvider.getListener(LISTENER_1, KafkaListenerType.INTERNAL, false, new KafkaListenerAuthenticationScramSha512());
85 | final KafkaListener listener = new KafkaListener(genericKafkaListener).withBootstrapServer(BOOTSTRAP_SERVER_9092);
86 |
87 | final Map secretData = listener.getConnectionSecretData();
88 |
89 | final Map expectedData = new HashMap<>();
90 | bootstrapServerKeys().forEach(key -> expectedData.put(key, encodeToString(BOOTSTRAP_SERVER_9092)));
91 | securityProtocolKeys().forEach(key -> expectedData.put(key, encodeToString(SecurityProtocol.SASL_PLAINTEXT.name)));
92 | assertThat(secretData).containsAllEntriesOf(expectedData);
93 | }
94 |
95 | @Test
96 | @DisplayName("When a Kafka listener with SASL auth and TLS enabled is created, then the connection data is correct and has security protocol SASL_SSL")
97 | void testKafkaListenerWithSaslAuthAndTls() {
98 | final String cert = encodeToString("-----BEGIN CERTIFICATE-----\nMIIFLTCCAx\n-----END CERTIFICATE-----\n");
99 | final Map certSecretData = new HashMap<>();
100 | certSecretData.put("ca.crt", cert);
101 | final GenericKafkaListener genericKafkaListener = ResourceProvider.getListener(LISTENER_1, KafkaListenerType.INTERNAL, true, new KafkaListenerAuthenticationScramSha512());
102 | final KafkaListener listener = new KafkaListener(genericKafkaListener).withBootstrapServer(BOOTSTRAP_SERVER_9092)
103 | .withCaCertSecret(certSecretData);
104 |
105 | final Map secretData = listener.getConnectionSecretData();
106 |
107 | final Map expectedData = new HashMap<>();
108 | bootstrapServerKeys().forEach(key -> expectedData.put(key, encodeToString(BOOTSTRAP_SERVER_9092)));
109 | securityProtocolKeys().forEach(key -> expectedData.put(key, encodeToString(SecurityProtocol.SASL_SSL.name)));
110 | expectedData.put("ssl.truststore.crt", cert);
111 | assertThat(secretData).containsAllEntriesOf(expectedData);
112 | }
113 |
114 |
115 |
116 | private String encodeToString(String data) {
117 | return encoder.encodeToString(data.getBytes(StandardCharsets.UTF_8));
118 | }
119 |
120 | }
121 |
--------------------------------------------------------------------------------
/packaging/examples/kafka-access-with-user.yaml:
--------------------------------------------------------------------------------
1 | # The operator will look up the Kafka instance specified and
2 | # create a secret with the details to connect to the listener specified.
3 | # It will also look up the KafkaUser specified and check it has the correct
4 | # authentication mechanism to connect to the listener. If so it will add the
5 | # user credentials to the secret it creates.
6 | # If no listener is specified it will choose one based on the user authentication.
7 | apiVersion: access.strimzi.io/v1alpha1
8 | kind: KafkaAccess
9 | metadata:
10 | name: my-kafka-access
11 | spec:
12 | kafka:
13 | name: my-cluster
14 | namespace: kafka
15 | listener: tls
16 | user:
17 | kind: KafkaUser
18 | apiGroup: kafka.strimzi.io
19 | name: my-user
20 | namespace: kafka
21 |
--------------------------------------------------------------------------------
/packaging/examples/kafka-access.yaml:
--------------------------------------------------------------------------------
1 | # The operator will look up the Kafka instance specified and
2 | # create a secret with the details to connect to the listener specified.
3 | # If no listener is specified it will choose one, preferring an internal listener.
4 | apiVersion: access.strimzi.io/v1alpha1
5 | kind: KafkaAccess
6 | metadata:
7 | name: my-kafka-access
8 | spec:
9 | kafka:
10 | name: my-cluster
11 | namespace: kafka
12 | listener: plain
13 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/Makefile:
--------------------------------------------------------------------------------
1 | PROJECT_NAME=helm-charts
2 |
3 | include ../../../Makefile.os
4 |
5 | RELEASE_VERSION ?= latest
6 | CHART_NAME=strimzi-access-operator
7 | CHART_PATH ?= ../helm3/strimzi-access-operator
8 | CHART_RENDERED_TEMPLATES_TMP=../../../target/charts
9 | CHART_RENDERED_TEMPLATES_INSTALL=../../install
10 | HELM_CLI=helm
11 |
12 | helm_clean:
13 | rm -rfv $(CHART_RENDERED_TEMPLATES_TMP)
14 | rm -rf strimzi-$(RELEASE_VERSION)/charts/
15 | rm -f $(CHART_NAME)-*.tgz
16 |
17 | helm_lint:
18 | # Linting Helm Chart
19 | $(HELM_CLI) lint --debug $(CHART_PATH)
20 |
21 | helm_install:
22 | # Copying rendered template files to: $(CHART_RENDERED_TEMPLATES_INSTALL)
23 | mkdir -p $(CHART_RENDERED_TEMPLATES_TMP)
24 | helm template --namespace strimzi-access-operator --output-dir $(CHART_RENDERED_TEMPLATES_TMP) $(CHART_PATH)
25 | $(FIND) $(CHART_RENDERED_TEMPLATES_TMP) -type f -name '*.yaml' -exec $(SED) -i '/^---/d' {} \;
26 | $(FIND) $(CHART_RENDERED_TEMPLATES_TMP) -type f -name '*.yaml' -exec $(SED) -i '/^# Source: /d' {} \;
27 | $(FIND) $(CHART_RENDERED_TEMPLATES_TMP) -type f -name '*.yaml' \
28 | | xargs -IFILE $(CP) FILE $(CHART_RENDERED_TEMPLATES_INSTALL)
29 | rm -rf $(CHART_RENDERED_TEMPLATES_TMP)
30 |
31 | helm_pkg: helm_clean helm_lint helm_install
32 | # Copying unarchived Helm Chart to release directory
33 | mkdir -p strimzi-$(RELEASE_VERSION)/charts/
34 | $(CP) -r $(CHART_PATH) strimzi-$(RELEASE_VERSION)/charts/$(CHART_NAME)
35 | # Packaging helm chart with semantic version: $(RELEASE_VERSION)
36 | $(HELM_CLI) package --version $(RELEASE_VERSION) --app-version $(RELEASE_VERSION) --destination ./ $(CHART_PATH)
37 | rm -rf strimzi-$(RELEASE_VERSION)
38 |
39 | java_build: helm_install
40 | java_install: java_build
41 | docker_build: helm_install
42 | docker_tag:
43 | docker_push:
44 |
45 | all: docker_build
46 | clean: helm_clean
47 |
48 | .PHONY: build clean release spotbugs
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | # TODO
3 | appVersion: "0.1.0"
4 | description: "Strimzi Kafka Access Operator"
5 | name: strimzi-access-operator
6 | version: 0.1.0
7 | icon: https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/main/documentation/logo/strimzi_logo.png
8 | keywords:
9 | - kafka
10 | - queue
11 | - stream
12 | - event
13 | - messaging
14 | - datastore
15 | - topic
16 | home: https://strimzi.io/
17 | sources:
18 | - https://github.com/strimzi/kafka-access-operator
19 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/README.md:
--------------------------------------------------------------------------------
1 | # Strimzi Access Operator
2 |
3 | Strimzi Access Operator provides a Kubernetes operator to help applications bind to an [Apache Kafka®](https://kafka.apache.org) cluster that is managed by the [Strimzi](https://strimzi.io) cluster operator.
4 |
5 | The operator creates a single Kubernetes `Secret` resource containing all the connection details for the Kafka cluster.
6 | The removes the need for applications to query multiple Kubernetes resources to get connection information.
7 | The `Secret` follows the conventions laid out in the [Service Binding Specification for Kubernetes v1.0.0](https://servicebinding.io/spec/core/1.0.0/).
8 |
9 | The operator is built using the [Java Operator SDK](https://github.com/java-operator-sdk/java-operator-sdk).
10 |
11 | ## Running the Access Operator
12 |
13 | For the operator to start successfully you need the Strimzi `Kafka` and `KafkaUser` custom resource definitions installed in your Kubernetes cluster.
14 | You can get these from the Strimzi [GitHub repository](https://github.com/strimzi/strimzi-kafka-operator/tree/main/install/cluster-operator),
15 | or use the [Strimzi quickstart guide](https://strimzi.io/quickstarts/) to also deploy the Strimzi cluster operator and a Kafka instance at the same time.
16 |
17 | ### Installing the Chart
18 |
19 | To install the chart with the release name `my-strimzi-access-operator`:
20 |
21 | ```bash
22 | $ helm install my-strimzi-access-operator oci://quay.io/strimzi-helm/strimzi-access-operator
23 | ```
24 |
25 | The command deploys the Strimzi Access Operator on the Kubernetes cluster with the default configuration.
26 |
27 | ### Uninstalling the Chart
28 |
29 | To uninstall/delete the `my-strimzi-access-operator` deployment:
30 |
31 | ```bash
32 | $ helm delete my-strimzi-access-operator
33 | ```
34 |
35 | The command removes all the Kubernetes components associated with the Strimzi Access Operator utility and deletes the release.
36 |
37 | ### Configuration
38 |
39 | The following table lists some available configurable parameters of the Strimzi chart and their default values.
40 | For a full list of supported options, check the [`values.yaml` file](./values.yaml).
41 |
42 | | Parameter | Description | Default |
43 | |--------------------------------------|-----------------------------------------------------------|----------|
44 | | `image.tag` | Override default Drain Cleaner image tag | `latest` |
45 | | `image.imagePullPolicy` | Image pull policy for all pods deployed by Drain Cleaner | `nil` |
46 | | `resources.limits.cpu` | Configures the CPU limit for the Access Operator Pod | `256Mi` |
47 | | `resources.limits.memory` | Configures the memory limit for the Access Operator Pod | `500m` |
48 | | `resources.requests.cpu` | Configures the CPU request for the Access Operator Pod | `256Mi` |
49 | | `resources.requests.memory` | Configures the memory request for the Access Operator Pod | `100m` |
50 | | `livenessProbe.initialDelaySeconds` | Liveness probe initial delay (in seconds) | `10` |
51 | | `livenessProbe.periodSeconds` | Liveness probe period (in seconds) | `30` |
52 | | `readinessProbe.initialDelaySeconds` | Readiness probe initial delay (in seconds) | `10` |
53 | | `readinessProbe.periodSeconds` | Readiness probe period (in seconds) | `30` |
54 |
55 | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
56 |
57 | ```bash
58 | $ helm install my-strimzi-access-operator --set replicaCount=2 oci://quay.io/strimzi-helm/strimzi-access-operator
59 | ```
60 |
61 | ## Using the Access Operator
62 |
63 | To make use of the Access Operator, create a `KafkaAccess` custom resource (CR).
64 | You must specify the name of the `Kafka` CR you want to connect to.
65 | You can optionally also specify the name of the listener in the `Kafka` CR and a `KafkaUser`.
66 | See the [examples folder](https://github.com/strimzi/kafka-access-operator/tree/main/examples) for some valid `KafkaAccess` specifications.
67 |
68 | If you do not specify which listener you want to connect to, the operator uses the following rules to choose a listener:
69 | 1. If there is only one listener configured in the `Kafka` CR, that listener is chosen.
70 | 2. If there are multiple listeners listed in the `Kafka` CR, the operator filters the list by comparing the `tls` and `authentication` properties in the `Kafka` and `KafkaUser` CRs to select a listener with the appropriate security.
71 | 3. If there are multiple listeners with appropriate security, the operator chooses the one that is of type `internal`.
72 | 4. If there are multiple internal listeners with appropriate security, the operator sorts the listeners alphabetically by name, and chooses the first one.
73 |
74 | Once the Access Operator has created the binding `Secret`, it updates the `KafkaAccess` custom resource to put the name of the secret in the status, for example:
75 |
76 | ```yaml
77 | ...
78 | status:
79 | binding:
80 | name: kafka-binding
81 | ```
82 |
83 | The `Secret` created by the Access Operator has the following structure:
84 |
85 | ```yaml
86 | apiVersion: v1
87 | kind: Secret
88 | metadata:
89 | name: kafka-binding
90 | type: servicebinding.io/kafka
91 | data:
92 | type: kafka
93 | provider: strimzi
94 |
95 | bootstrap.servers: # comma separated list of host:port for Kafka
96 | bootstrap-servers: # comma separated list of host:port for Kafka
97 | bootstrapServers: # comma separated list of host:port for Kafka
98 |
99 | security.protocol: # one of PLAINTEXT, SASL_PLAINTEXT, SASL_SSL or SSL
100 | securityProtocol: # one of PLAINTEXT, SASL_PLAINTEXT, SASL_SSL or SSL
101 |
102 | # Provided if TLS enabled:
103 | ssl.truststore.crt: # Strimzi cluster CA certificate
104 |
105 | # Provided if selected user is SCRAM auth:
106 | username: # SCRAM username
107 | password: # SCRAM password
108 | sasl.jaas.config: # sasl jaas config string for use by Java applications
109 | sasl.mechanism: SCRAM-SHA-512
110 | saslMechanism: SCRAM-SHA-512
111 |
112 | # Provided if selected user is mTLS:
113 | ssl.keystore.crt: # certificate for the consuming client signed by the clients' CA
114 | ssl.keystore.key: # private key for the consuming client
115 | ```
116 |
117 | Developers can make this `Secret` available to their applications themselves, or use an operator that implements the [Service Binding specification](https://servicebinding.io/spec/core/1.0.0/) to do it.
118 |
119 | ## Getting help
120 |
121 | If you encounter any issues while using the Access Operator, you can get help through the following methods:
122 |
123 | - [Strimzi Users mailing list](https://lists.cncf.io/g/cncf-strimzi-users/topics)
124 | - [#strimzi channel on CNCF Slack](https://slack.cncf.io/)
125 | - [GitHub Discussions](https://github.com/orgs/strimzi/discussions)
126 |
127 | ## Contributing
128 |
129 | You can contribute by:
130 | - Raising any issues you find using the Access Operator
131 | - Fixing issues by opening Pull Requests
132 | - Improving documentation
133 | - Talking about the Strimzi Access Operator
134 |
135 | All bugs, tasks or enhancements are tracked as [GitHub issues](https://github.com/strimzi/kafka-access-operator/issues).
136 |
137 | The [dev guide](https://github.com/strimzi/kafka-access-operator/blob/main/development-docs/DEV_GUIDE.md) describes how to build the operator and how to test your changes before submitting a patch or opening a PR.
138 |
139 | If you want to get in touch with us first before contributing, you can use:
140 |
141 | - [Strimzi Dev mailing list](https://lists.cncf.io/g/cncf-strimzi-dev/topics)
142 | - [#strimzi channel on CNCF Slack](https://slack.cncf.io/)
143 |
144 | Learn more on how you can contribute on our [Join Us](https://strimzi.io/join-us/) page.
145 |
146 | ## License
147 |
148 | Strimzi Access Operator is licensed under the [Apache License](./LICENSE), Version 2.0
149 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/crds/040-Crd-kafkaaccess.yaml:
--------------------------------------------------------------------------------
1 | # Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: kafkaaccesses.access.strimzi.io
6 | labels:
7 | servicebinding.io/provisioned-service: "true"
8 | spec:
9 | group: access.strimzi.io
10 | names:
11 | kind: KafkaAccess
12 | plural: kafkaaccesses
13 | shortNames:
14 | - ka
15 | singular: kafkaaccess
16 | scope: Namespaced
17 | versions:
18 | - additionalPrinterColumns:
19 | - jsonPath: .spec.kafka.listener
20 | name: Listener
21 | priority: 0
22 | type: string
23 | - jsonPath: .spec.kafka.name
24 | name: Cluster
25 | priority: 0
26 | type: string
27 | - jsonPath: .spec.user.name
28 | name: User
29 | priority: 0
30 | type: string
31 | name: v1alpha1
32 | schema:
33 | openAPIV3Schema:
34 | properties:
35 | spec:
36 | properties:
37 | kafka:
38 | properties:
39 | listener:
40 | type: string
41 | name:
42 | type: string
43 | namespace:
44 | type: string
45 | required:
46 | - name
47 | type: object
48 | user:
49 | properties:
50 | apiGroup:
51 | type: string
52 | kind:
53 | type: string
54 | name:
55 | type: string
56 | namespace:
57 | type: string
58 | required:
59 | - apiGroup
60 | - kind
61 | - name
62 | type: object
63 | required:
64 | - kafka
65 | type: object
66 | status:
67 | properties:
68 | binding:
69 | properties:
70 | name:
71 | type: string
72 | type: object
73 | conditions:
74 | items:
75 | properties:
76 | lastTransitionTime:
77 | type: string
78 | message:
79 | type: string
80 | reason:
81 | type: string
82 | status:
83 | type: string
84 | type:
85 | type: string
86 | type: object
87 | x-kubernetes-preserve-unknown-fields: true
88 | type: array
89 | observedGeneration:
90 | type: integer
91 | type: object
92 | type: object
93 | served: true
94 | storage: true
95 | subresources:
96 | status: {}
97 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/templates/010-ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | namespace: {{ .Release.Namespace }}
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/templates/020-ClusterRole.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | rules:
9 | - apiGroups:
10 | - "access.strimzi.io"
11 | resources:
12 | - kafkaaccesses
13 | - kafkaaccesses/status
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | - create
19 | - delete
20 | - patch
21 | - update
22 | - apiGroups:
23 | - "kafka.strimzi.io"
24 | resources:
25 | - kafkas
26 | - kafkausers
27 | verbs:
28 | - get
29 | - list
30 | - watch
31 | - apiGroups:
32 | - ""
33 | resources:
34 | - secrets
35 | verbs:
36 | - get
37 | - list
38 | - watch
39 | - create
40 | - delete
41 | - patch
42 | - update
43 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/templates/030-ClusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | subjects:
9 | - kind: ServiceAccount
10 | name: strimzi-access-operator
11 | namespace: {{ .Release.Namespace }}
12 | roleRef:
13 | kind: ClusterRole
14 | name: strimzi-access-operator
15 | apiGroup: rbac.authorization.k8s.io
16 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/templates/050-Deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: strimzi-access-operator
6 | labels:
7 | app: strimzi-access-operator
8 | namespace: {{ .Release.Namespace }}
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | app: strimzi-access-operator
14 | strimzi.io/kind: access-operator
15 | strategy:
16 | type: Recreate
17 | template:
18 | metadata:
19 | labels:
20 | app: strimzi-access-operator
21 | strimzi.io/kind: access-operator
22 | spec:
23 | serviceAccountName: strimzi-access-operator
24 | volumes:
25 | - name: strimzi-tmp
26 | emptyDir:
27 | medium: Memory
28 | sizeLimit: 1Mi
29 | containers:
30 | - name: access-operator
31 | image: {{ .Values.image.registry }}/{{ .Values.image.repository}}/{{ .Values.image.name }}:{{ .Values.image.tag }}
32 | {{- with .Values.image.pullPolicy }}
33 | imagePullPolicy: {{ . }}
34 | {{- end }}
35 | ports:
36 | - containerPort: 8080
37 | name: http
38 | args:
39 | - /opt/strimzi/bin/access_operator_run.sh
40 | volumeMounts:
41 | - name: strimzi-tmp
42 | mountPath: /tmp
43 | resources:
44 | {{ toYaml .Values.resources | indent 12 }}
45 | livenessProbe:
46 | httpGet:
47 | path: /healthy
48 | port: http
49 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
50 | periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
51 | readinessProbe:
52 | httpGet:
53 | path: /ready
54 | port: http
55 | initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
56 | periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
57 |
--------------------------------------------------------------------------------
/packaging/helm-charts/helm3/strimzi-access-operator/values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | registry: quay.io
3 | repository: strimzi
4 | name: access-operator
5 | tag: latest
6 |
7 | resources:
8 | limits:
9 | memory: 256Mi
10 | cpu: 500m
11 | requests:
12 | memory: 256Mi
13 | cpu: 100m
14 | livenessProbe:
15 | initialDelaySeconds: 10
16 | periodSeconds: 30
17 | readinessProbe:
18 | initialDelaySeconds: 10
19 | periodSeconds: 30
20 |
--------------------------------------------------------------------------------
/packaging/install/000-Namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 |
--------------------------------------------------------------------------------
/packaging/install/010-ServiceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | namespace: strimzi-access-operator
8 |
--------------------------------------------------------------------------------
/packaging/install/020-ClusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | rules:
8 | - apiGroups:
9 | - "access.strimzi.io"
10 | resources:
11 | - kafkaaccesses
12 | - kafkaaccesses/status
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 | - create
18 | - delete
19 | - patch
20 | - update
21 | - apiGroups:
22 | - "kafka.strimzi.io"
23 | resources:
24 | - kafkas
25 | - kafkausers
26 | verbs:
27 | - get
28 | - list
29 | - watch
30 | - apiGroups:
31 | - ""
32 | resources:
33 | - secrets
34 | verbs:
35 | - get
36 | - list
37 | - watch
38 | - create
39 | - delete
40 | - patch
41 | - update
42 |
--------------------------------------------------------------------------------
/packaging/install/030-ClusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | subjects:
8 | - kind: ServiceAccount
9 | name: strimzi-access-operator
10 | namespace: strimzi-access-operator
11 | roleRef:
12 | kind: ClusterRole
13 | name: strimzi-access-operator
14 | apiGroup: rbac.authorization.k8s.io
15 |
--------------------------------------------------------------------------------
/packaging/install/040-Crd-kafkaaccess.yaml:
--------------------------------------------------------------------------------
1 | # Generated by Fabric8 CRDGenerator, manual edits might get overwritten!
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: kafkaaccesses.access.strimzi.io
6 | labels:
7 | servicebinding.io/provisioned-service: "true"
8 | spec:
9 | group: access.strimzi.io
10 | names:
11 | kind: KafkaAccess
12 | plural: kafkaaccesses
13 | shortNames:
14 | - ka
15 | singular: kafkaaccess
16 | scope: Namespaced
17 | versions:
18 | - additionalPrinterColumns:
19 | - jsonPath: .spec.kafka.listener
20 | name: Listener
21 | priority: 0
22 | type: string
23 | - jsonPath: .spec.kafka.name
24 | name: Cluster
25 | priority: 0
26 | type: string
27 | - jsonPath: .spec.user.name
28 | name: User
29 | priority: 0
30 | type: string
31 | name: v1alpha1
32 | schema:
33 | openAPIV3Schema:
34 | properties:
35 | spec:
36 | properties:
37 | kafka:
38 | properties:
39 | listener:
40 | type: string
41 | name:
42 | type: string
43 | namespace:
44 | type: string
45 | required:
46 | - name
47 | type: object
48 | user:
49 | properties:
50 | apiGroup:
51 | type: string
52 | kind:
53 | type: string
54 | name:
55 | type: string
56 | namespace:
57 | type: string
58 | required:
59 | - apiGroup
60 | - kind
61 | - name
62 | type: object
63 | required:
64 | - kafka
65 | type: object
66 | status:
67 | properties:
68 | binding:
69 | properties:
70 | name:
71 | type: string
72 | type: object
73 | conditions:
74 | items:
75 | properties:
76 | lastTransitionTime:
77 | type: string
78 | message:
79 | type: string
80 | reason:
81 | type: string
82 | status:
83 | type: string
84 | type:
85 | type: string
86 | type: object
87 | x-kubernetes-preserve-unknown-fields: true
88 | type: array
89 | observedGeneration:
90 | type: integer
91 | type: object
92 | type: object
93 | served: true
94 | storage: true
95 | subresources:
96 | status: {}
97 |
--------------------------------------------------------------------------------
/packaging/install/050-Deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: strimzi-access-operator
5 | labels:
6 | app: strimzi-access-operator
7 | namespace: strimzi-access-operator
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: strimzi-access-operator
13 | strimzi.io/kind: access-operator
14 | strategy:
15 | type: Recreate
16 | template:
17 | metadata:
18 | labels:
19 | app: strimzi-access-operator
20 | strimzi.io/kind: access-operator
21 | spec:
22 | serviceAccountName: strimzi-access-operator
23 | volumes:
24 | - name: strimzi-tmp
25 | emptyDir:
26 | medium: Memory
27 | sizeLimit: 1Mi
28 | containers:
29 | - name: access-operator
30 | image: quay.io/strimzi/access-operator:latest
31 | ports:
32 | - containerPort: 8080
33 | name: http
34 | args:
35 | - /opt/strimzi/bin/access_operator_run.sh
36 | volumeMounts:
37 | - name: strimzi-tmp
38 | mountPath: /tmp
39 | resources:
40 | limits:
41 | cpu: 500m
42 | memory: 256Mi
43 | requests:
44 | cpu: 100m
45 | memory: 256Mi
46 | livenessProbe:
47 | httpGet:
48 | path: /healthy
49 | port: http
50 | initialDelaySeconds: 10
51 | periodSeconds: 30
52 | readinessProbe:
53 | httpGet:
54 | path: /ready
55 | port: http
56 | initialDelaySeconds: 10
57 | periodSeconds: 30
58 |
--------------------------------------------------------------------------------
/release.version:
--------------------------------------------------------------------------------
1 | 0.2.0-SNAPSHOT
2 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/Environment.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access;
6 |
7 | import io.skodjob.testframe.enums.InstallType;
8 | import io.skodjob.testframe.environment.TestEnvironmentVariables;
9 |
10 | public class Environment {
11 |
12 | private static final TestEnvironmentVariables ENVIRONMENT_VARIABLES = new TestEnvironmentVariables();
13 |
14 | //---------------------------------------
15 | // Env variables initialization
16 | //---------------------------------------
17 | private static final String INSTALL_TYPE_ENV = "INSTALL_TYPE";
18 | public static final InstallType INSTALL_TYPE = ENVIRONMENT_VARIABLES.getOrDefault(INSTALL_TYPE_ENV, InstallType::fromString, InstallType.Yaml);
19 |
20 | private static final String OPERATOR_REGISTRY_ENV = "DOCKER_REGISTRY";
21 | public static final String OPERATOR_REGISTRY = ENVIRONMENT_VARIABLES.getOrDefault(OPERATOR_REGISTRY_ENV, null);
22 |
23 | private static final String OPERATOR_ORG_ENV = "DOCKER_ORG";
24 | public static final String OPERATOR_ORG = ENVIRONMENT_VARIABLES.getOrDefault(OPERATOR_ORG_ENV, null);
25 |
26 | private static final String OPERATOR_TAG_ENV = "DOCKER_TAG";
27 | public static final String OPERATOR_TAG = ENVIRONMENT_VARIABLES.getOrDefault(OPERATOR_TAG_ENV, null);
28 |
29 | static {
30 | ENVIRONMENT_VARIABLES.logEnvironmentVariables();
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/TestConstants.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access;
6 |
7 | import java.time.Duration;
8 |
9 | /**
10 | * Interface for keeping global constants used across system tests.
11 | */
12 | public interface TestConstants {
13 | String USER_PATH = System.getProperty("user.dir");
14 |
15 | String INSTALL_PATH = USER_PATH + "/../packaging/install/";
16 | String HELM_CHARTS_PATH = USER_PATH + "/../packaging/helm-charts/helm3/strimzi-access-operator";
17 |
18 | //--------------------------
19 | // Strimzi related constants
20 | //--------------------------
21 | // in case of change in the pom.xml, update this one as well please
22 | String STRIMZI_API_VERSION = "0.41.0";
23 |
24 | //--------------------------
25 | // Resource types
26 | //--------------------------
27 | String NAMESPACE = "Namespace";
28 | String DEPLOYMENT = "Deployment";
29 | String SERVICE_ACCOUNT = "ServiceAccount";
30 | String CLUSTER_ROLE = "ClusterRole";
31 | String CLUSTER_ROLE_BINDING = "ClusterRoleBinding";
32 | String CUSTOM_RESOURCE_DEFINITION_SHORT = "Crd";
33 |
34 | //--------------------------
35 | // KafkaUser Secret fields
36 | //--------------------------
37 | String USER_CRT = "user.crt";
38 | String USER_KEY = "user.key";
39 | String PASSWORD = "password";
40 | String SASL_JAAS_CONFIG = "sasl.jaas.config";
41 |
42 | //--------------------------
43 | // Labels
44 | //--------------------------
45 |
46 | //--------------------------
47 | // Access Secret's fields
48 | //--------------------------
49 | String BOOTSTRAP_SERVERS = "bootstrapServers";
50 | String SECURITY_PROTOCOL = "securityProtocol";
51 | String SSL_KEYSTORE_CRT = "ssl.keystore.crt";
52 | String SSL_KEYSTORE_KEY = "ssl.keystore.key";
53 |
54 | //--------------------------
55 | // Duration constants
56 | //--------------------------
57 | long GLOBAL_POLL_INTERVAL_SHORT_MS = Duration.ofSeconds(1).toMillis();
58 | long GLOBAL_TIMEOUT_SHORT_MS = Duration.ofMinutes(2).toMillis();
59 | }
60 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/TestStorage.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access;
6 |
7 | import java.util.Random;
8 |
9 | public class TestStorage {
10 |
11 | private static final String ACCESS_PREFIX = "access-";
12 | private static final String CLUSTER_NAME_PREFIX = "cluster-";
13 | private static final Random RANDOM = new Random();
14 |
15 | private final String kafkaAccessName;
16 | private final String kafkaClusterName;
17 |
18 | public TestStorage() {
19 | int randomized = Math.abs(RANDOM.nextInt(Integer.MAX_VALUE));
20 | this.kafkaAccessName = ACCESS_PREFIX + randomized;
21 | this.kafkaClusterName = CLUSTER_NAME_PREFIX + randomized;
22 | }
23 |
24 | public String getKafkaAccessName() {
25 | return kafkaAccessName;
26 | }
27 |
28 | public String getKafkaClusterName() {
29 | return kafkaClusterName;
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/installation/BundleInstallation.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.installation;
6 |
7 | import io.fabric8.kubernetes.api.model.NamespaceBuilder;
8 | import io.fabric8.kubernetes.api.model.ServiceAccount;
9 | import io.fabric8.kubernetes.api.model.ServiceAccountBuilder;
10 | import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition;
11 | import io.fabric8.kubernetes.api.model.apps.Deployment;
12 | import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder;
13 | import io.fabric8.kubernetes.api.model.rbac.ClusterRole;
14 | import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding;
15 | import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBindingBuilder;
16 | import io.skodjob.testframe.installation.InstallationMethod;
17 | import io.skodjob.testframe.resources.KubeResourceManager;
18 | import io.skodjob.testframe.utils.ImageUtils;
19 | import io.skodjob.testframe.utils.TestFrameUtils;
20 | import io.strimzi.kafka.access.Environment;
21 | import io.strimzi.kafka.access.TestConstants;
22 |
23 | import java.io.File;
24 | import java.util.Arrays;
25 | import java.util.List;
26 |
27 | public class BundleInstallation implements InstallationMethod {
28 | private final String installationNamespace;
29 |
30 | public BundleInstallation(String installationNamespace) {
31 | this.installationNamespace = installationNamespace;
32 | }
33 |
34 | @Override
35 | public void install() {
36 | List accessOperatorFiles = Arrays.stream(new File(TestConstants.INSTALL_PATH).listFiles())
37 | .sorted()
38 | .filter(File::isFile)
39 | .toList();
40 |
41 | accessOperatorFiles.forEach(file -> {
42 | final String resourceType = file.getName().split("-")[1].replace(".yaml", "");
43 |
44 | switch (resourceType) {
45 | case TestConstants.NAMESPACE:
46 | // create Namespace
47 | KubeResourceManager.getInstance().createResourceWithWait(new NamespaceBuilder()
48 | .editOrNewMetadata()
49 | .withName(installationNamespace)
50 | .endMetadata()
51 | .build()
52 | );
53 | break;
54 | case TestConstants.SERVICE_ACCOUNT:
55 | ServiceAccount serviceAccount = TestFrameUtils.configFromYaml(file, ServiceAccount.class);
56 | KubeResourceManager.getInstance().createOrUpdateResourceWithWait(new ServiceAccountBuilder(serviceAccount)
57 | .editMetadata()
58 | .withNamespace(installationNamespace)
59 | .endMetadata()
60 | .build()
61 | );
62 | break;
63 | case TestConstants.CLUSTER_ROLE:
64 | ClusterRole clusterRole = TestFrameUtils.configFromYaml(file, ClusterRole.class);
65 | KubeResourceManager.getInstance().createOrUpdateResourceWithWait(clusterRole);
66 | break;
67 | case TestConstants.CLUSTER_ROLE_BINDING:
68 | ClusterRoleBinding clusterRoleBinding = TestFrameUtils.configFromYaml(file, ClusterRoleBinding.class);
69 | KubeResourceManager.getInstance().createOrUpdateResourceWithWait(new ClusterRoleBindingBuilder(clusterRoleBinding)
70 | .editOrNewMetadata()
71 | .withNamespace(installationNamespace)
72 | .endMetadata()
73 | .editFirstSubject()
74 | .withNamespace(installationNamespace)
75 | .endSubject()
76 | .build()
77 | );
78 | break;
79 | case TestConstants.CUSTOM_RESOURCE_DEFINITION_SHORT:
80 | CustomResourceDefinition customResourceDefinition = TestFrameUtils.configFromYaml(file, CustomResourceDefinition.class);
81 | KubeResourceManager.getInstance().createOrUpdateResourceWithWait(customResourceDefinition);
82 | break;
83 | case TestConstants.DEPLOYMENT:
84 | deployKafkaAccessOperator(file);
85 | break;
86 | default:
87 | // nothing to do, skipping
88 | break;
89 | }
90 | });
91 | }
92 |
93 | @Override
94 | public void delete() {
95 | // nothing to do here, as we are deleting everything using KubeResourceManager, which deletes everything at the
96 | // end of the tests
97 | }
98 |
99 | private void deployKafkaAccessOperator(File deploymentFile) {
100 | Deployment accessOperatorDeployment = TestFrameUtils.configFromYaml(deploymentFile, Deployment.class);
101 |
102 | String deploymentImage = accessOperatorDeployment
103 | .getSpec()
104 | .getTemplate()
105 | .getSpec()
106 | .getContainers()
107 | .get(0)
108 | .getImage();
109 |
110 | accessOperatorDeployment = new DeploymentBuilder(accessOperatorDeployment)
111 | .editOrNewMetadata()
112 | .withNamespace(installationNamespace)
113 | .endMetadata()
114 | .editSpec()
115 | .editTemplate()
116 | .editSpec()
117 | .editContainer(0)
118 | .withImage(ImageUtils.changeRegistryOrgAndTag(deploymentImage, Environment.OPERATOR_REGISTRY, Environment.OPERATOR_ORG, Environment.OPERATOR_TAG))
119 | .endContainer()
120 | .endSpec()
121 | .endTemplate()
122 | .endSpec()
123 | .build();
124 |
125 | KubeResourceManager.getInstance().createResourceWithWait(accessOperatorDeployment);
126 | }
127 | }
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/installation/HelmInstallation.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.installation;
6 |
7 | import com.marcnuri.helm.Helm;
8 | import com.marcnuri.helm.InstallCommand;
9 | import com.marcnuri.helm.UninstallCommand;
10 | import io.skodjob.testframe.installation.InstallationMethod;
11 | import io.skodjob.testframe.resources.KubeResourceManager;
12 | import io.strimzi.kafka.access.Environment;
13 | import io.strimzi.kafka.access.TestConstants;
14 |
15 | import java.nio.file.Paths;
16 |
17 | public class HelmInstallation implements InstallationMethod {
18 |
19 | private final String installationNamespace;
20 |
21 | public HelmInstallation(String installationNamespace) {
22 | this.installationNamespace = installationNamespace;
23 | }
24 |
25 | public static final String HELM_RELEASE_NAME = "kao-systemtests";
26 |
27 | @Override
28 | public void install() {
29 | InstallCommand installCommand = new Helm(Paths.get(TestConstants.HELM_CHARTS_PATH))
30 | .install()
31 | .withName(HELM_RELEASE_NAME)
32 | .withNamespace(installationNamespace)
33 | .createNamespace()
34 | .waitReady();
35 |
36 | if (Environment.OPERATOR_REGISTRY != null) {
37 | // image registry config
38 | installCommand.set("image.registry", Environment.OPERATOR_REGISTRY);
39 | }
40 |
41 | if (Environment.OPERATOR_ORG != null) {
42 | // image repository config
43 | installCommand.set("image.repository", Environment.OPERATOR_ORG);
44 | }
45 |
46 | if (Environment.OPERATOR_TAG != null) {
47 | // image tags config
48 | installCommand.set("image.tag", Environment.OPERATOR_TAG);
49 | }
50 |
51 | installCommand.call();
52 | }
53 |
54 | @Override
55 | public void delete() {
56 | Helm.uninstall(HELM_RELEASE_NAME)
57 | .withCascade(UninstallCommand.Cascade.ORPHAN)
58 | .withNamespace(installationNamespace)
59 | .call();
60 |
61 | KubeResourceManager.getKubeClient().getClient().namespaces().withName(installationNamespace).delete();
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/installation/SetupAccessOperator.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.installation;
6 |
7 | import io.skodjob.testframe.enums.InstallType;
8 | import io.skodjob.testframe.installation.InstallationMethod;
9 | import io.strimzi.kafka.access.Environment;
10 |
11 | public class SetupAccessOperator {
12 | private final InstallationMethod installationMethod;
13 | private final String installationNamespace;
14 |
15 | public SetupAccessOperator(String installationNamespace) {
16 | this.installationNamespace = installationNamespace;
17 | this.installationMethod = getInstallationMethod();
18 | }
19 |
20 | public void install() {
21 | installationMethod.install();
22 | }
23 |
24 | public void delete() {
25 | installationMethod.delete();
26 | }
27 |
28 | private InstallationMethod getInstallationMethod() {
29 | return Environment.INSTALL_TYPE == InstallType.Helm ? new HelmInstallation(installationNamespace) : new BundleInstallation(installationNamespace);
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/resources/KafkaAccessType.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.resources;
6 |
7 | import io.fabric8.kubernetes.api.model.KubernetesResourceList;
8 | import io.fabric8.kubernetes.client.dsl.MixedOperation;
9 | import io.fabric8.kubernetes.client.dsl.Resource;
10 | import io.skodjob.testframe.interfaces.ResourceType;
11 | import io.skodjob.testframe.resources.KubeResourceManager;
12 | import io.strimzi.api.kafka.model.common.Condition;
13 | import io.strimzi.kafka.access.model.KafkaAccess;
14 | import io.strimzi.kafka.access.model.KafkaAccessStatus;
15 |
16 | import java.util.Optional;
17 | import java.util.function.Consumer;
18 |
19 | public class KafkaAccessType implements ResourceType {
20 |
21 | private final MixedOperation, Resource> client;
22 |
23 | /**
24 | * Constructor
25 | */
26 | public KafkaAccessType() {
27 | this.client = kafkaAccessClient();
28 | }
29 |
30 |
31 | @Override
32 | public MixedOperation, ?, ?> getClient() {
33 | return this.client;
34 | }
35 |
36 | @Override
37 | public String getKind() {
38 | return KafkaAccess.KIND;
39 | }
40 |
41 | @Override
42 | public void create(KafkaAccess kafkaAccess) {
43 | client.resource(kafkaAccess).create();
44 | }
45 |
46 | @Override
47 | public void update(KafkaAccess kafkaAccess) {
48 | client.resource(kafkaAccess).update();
49 | }
50 |
51 | @Override
52 | public void delete(KafkaAccess kafkaAccess) {
53 | client.resource(kafkaAccess).delete();
54 | }
55 |
56 | @Override
57 | public void replace(KafkaAccess kafkaAccess, Consumer editor) {
58 | KafkaAccess toBeReplaced = client.inNamespace(kafkaAccess.getMetadata().getNamespace()).withName(kafkaAccess.getMetadata().getName()).get();
59 | editor.accept(toBeReplaced);
60 | update(toBeReplaced);
61 | }
62 |
63 | @Override
64 | public boolean isReady(KafkaAccess kafkaAccess) {
65 | KafkaAccessStatus kafkaAccessStatus = client.resource(kafkaAccess).get().getStatus();
66 | Optional readyCondition = kafkaAccessStatus.getConditions().stream().filter(condition -> condition.getType().equals("Ready")).findFirst();
67 |
68 | return readyCondition.map(condition -> condition.getStatus().equals("True")).orElse(false);
69 | }
70 |
71 | @Override
72 | public boolean isDeleted(KafkaAccess kafkaAccess) {
73 | return kafkaAccess == null;
74 | }
75 |
76 | public static MixedOperation, Resource> kafkaAccessClient() {
77 | return KubeResourceManager.getKubeClient().getClient().resources(KafkaAccess.class);
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/templates/KafkaAccessTemplates.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.templates;
6 |
7 | import io.strimzi.kafka.access.model.KafkaAccessBuilder;
8 |
9 | public class KafkaAccessTemplates {
10 |
11 | private KafkaAccessTemplates() {}
12 |
13 | public static KafkaAccessBuilder kafkaAccess(String namespaceName, String name) {
14 | return new KafkaAccessBuilder()
15 | .withNewMetadata()
16 | .withName(name)
17 | .withNamespace(namespaceName)
18 | .endMetadata();
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/templates/KafkaTemplates.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.templates;
6 |
7 | import io.strimzi.api.kafka.model.kafka.KafkaBuilder;
8 | import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListener;
9 | import io.strimzi.api.kafka.model.kafka.listener.ListenerAddress;
10 | import io.strimzi.api.kafka.model.kafka.listener.ListenerAddressBuilder;
11 | import io.strimzi.api.kafka.model.kafka.listener.ListenerStatus;
12 | import io.strimzi.api.kafka.model.kafka.listener.ListenerStatusBuilder;
13 |
14 | import java.util.ArrayList;
15 | import java.util.List;
16 |
17 | public class KafkaTemplates {
18 |
19 | private KafkaTemplates() {}
20 |
21 | public static KafkaBuilder kafkaWithListeners(String namespaceName, String clusterName, String host, List listOfListeners) {
22 | List listOfStatuses = new ArrayList<>();
23 |
24 | listOfListeners.forEach(listener -> {
25 | ListenerAddress address = new ListenerAddressBuilder()
26 | .withHost(host)
27 | .withPort(listener.getPort())
28 | .build();
29 |
30 | listOfStatuses.add(new ListenerStatusBuilder()
31 | .withName(listener.getName())
32 | .withAddresses(List.of(address))
33 | .build());
34 | });
35 |
36 | return new KafkaBuilder()
37 | .withNewMetadata()
38 | .withName(clusterName)
39 | .withNamespace(namespaceName)
40 | .endMetadata()
41 | .withNewSpec()
42 | .withNewKafka()
43 | .withReplicas(3)
44 | .withListeners(listOfListeners)
45 | .endKafka()
46 | .withNewZookeeper()
47 | .withReplicas(3)
48 | .withNewEphemeralStorage()
49 | .endEphemeralStorage()
50 | .endZookeeper()
51 | .endSpec()
52 | .withNewStatus()
53 | .addAllToListeners(listOfStatuses)
54 | .endStatus();
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/templates/KafkaUserTemplates.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.templates;
6 |
7 | import io.strimzi.api.kafka.model.user.KafkaUser;
8 | import io.strimzi.api.kafka.model.user.KafkaUserAuthentication;
9 | import io.strimzi.api.kafka.model.user.KafkaUserBuilder;
10 |
11 | public class KafkaUserTemplates {
12 |
13 | private KafkaUserTemplates() {}
14 |
15 | public static KafkaUser kafkaUser(String namespaceName, String userName, KafkaUserAuthentication auth) {
16 | return new KafkaUserBuilder()
17 | .withNewMetadata()
18 | .withName(userName)
19 | .withNamespace(namespaceName)
20 | .endMetadata()
21 | .withNewSpec()
22 | .withAuthentication(auth)
23 | .endSpec()
24 | .withNewStatus()
25 | .withSecret(userName)
26 | .withUsername("CN=" + userName)
27 | .endStatus()
28 | .build();
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/templates/ListenerTemplates.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.templates;
6 |
7 | import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListener;
8 | import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder;
9 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerAuthentication;
10 | import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType;
11 |
12 | public class ListenerTemplates {
13 |
14 | private ListenerTemplates() {}
15 |
16 | public static GenericKafkaListener tlsListener(String name, KafkaListenerType type, KafkaListenerAuthentication auth, int port) {
17 | return defaultListener(name, type, auth, port)
18 | .withTls()
19 | .build();
20 | }
21 |
22 | public static GenericKafkaListener listener(String name, KafkaListenerType type, KafkaListenerAuthentication auth, int port) {
23 | return defaultListener(name, type, auth, port)
24 | .withTls(false)
25 | .build();
26 | }
27 |
28 | private static GenericKafkaListenerBuilder defaultListener(String name, KafkaListenerType type, KafkaListenerAuthentication auth, int port) {
29 | return new GenericKafkaListenerBuilder()
30 | .withName(name)
31 | .withType(type)
32 | .withAuth(auth)
33 | .withPort(port);
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/templates/SecretTemplates.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.templates;
6 |
7 | import io.fabric8.kubernetes.api.model.Secret;
8 | import io.fabric8.kubernetes.api.model.SecretBuilder;
9 | import io.strimzi.kafka.access.TestConstants;
10 |
11 | import java.util.HashMap;
12 | import java.util.Map;
13 |
14 | public class SecretTemplates {
15 |
16 | private SecretTemplates() {}
17 |
18 | public static Secret tlsSecretForUser(String namespaceName, String userName, String clusterName, String userKey, String userCrt) {
19 | Map data = Map.of(
20 | TestConstants.USER_KEY, userKey,
21 | TestConstants.USER_CRT, userCrt
22 | );
23 |
24 | return defaultSecretForUser(namespaceName, userName, clusterName, data);
25 | }
26 |
27 | public static Secret scramShaSecretForUser(String namespaceName, String userName, String clusterName, String password, String saslJaasConfig) {
28 | Map data = Map.of(
29 | TestConstants.PASSWORD, password,
30 | TestConstants.SASL_JAAS_CONFIG, saslJaasConfig
31 | );
32 |
33 | return defaultSecretForUser(namespaceName, userName, clusterName, data);
34 | }
35 |
36 | private static Secret defaultSecretForUser(String namespaceName, String userName, String clusterName, Map data) {
37 | final Map labels = new HashMap<>();
38 |
39 | labels.put("app.kubernetes.io/managed-by", "strimzi-user-operator");
40 | labels.put("strimzi.io/cluster", clusterName);
41 |
42 | return new SecretBuilder()
43 | .withNewMetadata()
44 | .withName(userName)
45 | .withNamespace(namespaceName)
46 | .withLabels(labels)
47 | .endMetadata()
48 | .addToData(data)
49 | .build();
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/utils/Base64Utils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.utils;
6 |
7 | import java.nio.charset.StandardCharsets;
8 | import java.util.Base64;
9 |
10 | public class Base64Utils {
11 |
12 | private Base64Utils() {}
13 |
14 | private static final Base64.Encoder ENCODER = Base64.getEncoder();
15 | private static final Base64.Decoder DECODER = Base64.getDecoder();
16 |
17 | public static byte[] decodeFromBase64(String encodedData) {
18 | return DECODER.decode(encodedData);
19 | }
20 |
21 | public static String decodeFromBase64ToString(String encodedData) {
22 | return new String(decodeFromBase64(encodedData), StandardCharsets.US_ASCII);
23 | }
24 |
25 | public static String encodeToBase64(String data) {
26 | return ENCODER.encodeToString(data.getBytes(StandardCharsets.UTF_8));
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/utils/KafkaAccessUtils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.utils;
6 |
7 | import io.skodjob.testframe.wait.Wait;
8 | import io.strimzi.api.kafka.model.common.Condition;
9 | import io.strimzi.kafka.access.TestConstants;
10 | import io.strimzi.kafka.access.resources.KafkaAccessType;
11 |
12 | import java.util.Optional;
13 |
14 | public class KafkaAccessUtils {
15 |
16 | private KafkaAccessUtils() {}
17 |
18 | public static final String READY_TYPE = "Ready";
19 |
20 | public static boolean waitForKafkaAccessReady(String namespaceName, String accessName) {
21 | return waitForKafkaAccessStatus(namespaceName, accessName, READY_TYPE, "True");
22 | }
23 |
24 | public static boolean waitForKafkaAccessNotReady(String namespaceName, String accessName) {
25 | return waitForKafkaAccessStatus(namespaceName, accessName, READY_TYPE, "False");
26 | }
27 |
28 | public static boolean waitForKafkaAccessStatus(String namespaceName, String accessName, String conditionType, String conditionStatus) {
29 | Wait.until(
30 | "KafkaAccess %s/%s to contain condition %s with status %s".formatted(namespaceName, accessName, conditionType, conditionStatus),
31 | TestConstants.GLOBAL_POLL_INTERVAL_SHORT_MS,
32 | TestConstants.GLOBAL_TIMEOUT_SHORT_MS,
33 | () -> {
34 | Optional desiredStatus = KafkaAccessType.kafkaAccessClient().inNamespace(namespaceName).withName(accessName).get()
35 | .getStatus().getConditions().stream().filter(condition -> condition.getType().equals(conditionType)).findFirst();
36 |
37 | return desiredStatus.map(condition -> condition.getStatus().equals(conditionStatus)).orElse(false);
38 | }
39 | );
40 |
41 | return true;
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/utils/ListenerUtils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.utils;
6 |
7 | public class ListenerUtils {
8 |
9 | private ListenerUtils() {}
10 |
11 | public static String bootstrapServer(String hostname, int port) {
12 | return String.join(":", hostname, String.valueOf(port));
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/systemtest/src/main/java/io/strimzi/kafka/access/utils/SecretUtils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access.utils;
6 |
7 | public class SecretUtils {
8 |
9 | private SecretUtils() {}
10 |
11 | public static String createUserKey(String key) {
12 | String data = "-----BEGIN PRIVATE KEY-----\n" + key + "\n-----END PRIVATE KEY-----\n";
13 | return Base64Utils.encodeToBase64(data);
14 | }
15 |
16 | public static String createUserCrt(String crt) {
17 | String data = "-----BEGIN CERTIFICATE-----\n" + crt + "\n-----END CERTIFICATE-----\n";
18 | return Base64Utils.encodeToBase64(data);
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/systemtest/src/main/resources/log4j2.properties:
--------------------------------------------------------------------------------
1 | name = STConfig
2 |
3 | appender.console.type = Console
4 | appender.console.name = STDOUT
5 | appender.console.layout.type = PatternLayout
6 | appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss}{GMT} [%thread] %highlight{%-5p} [%c{1}:%L] %m%n
7 |
8 | appender.rolling.type = RollingFile
9 | appender.rolling.name = RollingFile
10 | appender.rolling.fileName = ${env:TEST_LOG_DIR:-target/logs}/strimzi-debug-${env:BUILD_ID:-0}.log
11 | appender.rolling.filePattern = ${env:TEST_LOG_DIR:-target/logs}/strimzi-debug-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
12 | appender.rolling.policies.type = Policies
13 | appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
14 | appender.rolling.policies.size.size=100MB
15 | appender.rolling.strategy.type = DefaultRolloverStrategy
16 | appender.rolling.strategy.max = 5
17 | appender.rolling.layout.type = PatternLayout
18 | appender.rolling.layout.pattern=%d{yyyy-MM-dd HH:mm:ss}{GMT} %-5p [%c{1}:%L] %m%n
19 |
20 | rootLogger.level = ${env:STRIMZI_TEST_ROOT_LOG_LEVEL:-DEBUG}
21 | rootLogger.appenderRef.console.ref = STDOUT
22 | rootLogger.appenderRef.console.level = ${env:STRIMZI_TEST_LOG_LEVEL:-INFO}
23 | rootLogger.appenderRef.rolling.ref = RollingFile
24 | rootLogger.appenderRef.rolling.level = DEBUG
25 | rootLogger.additivity = false
26 |
27 | logger.frame.name = io.skodjob.testframe
28 | logger.frame.level = info
--------------------------------------------------------------------------------
/systemtest/src/test/java/io/strimzi/kafka/access/AbstractST.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright Strimzi authors.
3 | * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
4 | */
5 | package io.strimzi.kafka.access;
6 |
7 | import io.skodjob.testframe.annotations.ResourceManager;
8 | import io.skodjob.testframe.annotations.TestVisualSeparator;
9 | import io.skodjob.testframe.resources.ClusterRoleBindingType;
10 | import io.skodjob.testframe.resources.ClusterRoleType;
11 | import io.skodjob.testframe.resources.CustomResourceDefinitionType;
12 | import io.skodjob.testframe.resources.DeploymentType;
13 | import io.skodjob.testframe.resources.KubeResourceManager;
14 | import io.skodjob.testframe.resources.NamespaceType;
15 | import io.strimzi.kafka.access.installation.SetupAccessOperator;
16 | import io.strimzi.kafka.access.resources.KafkaAccessType;
17 | import org.junit.jupiter.api.AfterAll;
18 | import org.junit.jupiter.api.BeforeAll;
19 | import org.junit.jupiter.api.TestInstance;
20 |
21 | @ResourceManager
22 | @TestVisualSeparator
23 | @TestInstance(TestInstance.Lifecycle.PER_CLASS)
24 | @SuppressWarnings("ClassDataAbstractionCoupling")
25 | public abstract class AbstractST {
26 | protected final KubeResourceManager resourceManager = KubeResourceManager.getInstance();
27 | public final String namespace = "main-namespace";
28 | private final String kafkaCrdUrl = "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/%s/packaging/install/cluster-operator/040-Crd-kafka.yaml".formatted(TestConstants.STRIMZI_API_VERSION);
29 | private final String kafkaUserCrdUrl = "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/%s/packaging/install/cluster-operator/044-Crd-kafkauser.yaml".formatted(TestConstants.STRIMZI_API_VERSION);
30 | private final SetupAccessOperator setupAccessOperator = new SetupAccessOperator(namespace);
31 |
32 | static {
33 | KubeResourceManager.getInstance().setResourceTypes(
34 | new ClusterRoleBindingType(),
35 | new ClusterRoleType(),
36 | new CustomResourceDefinitionType(),
37 | new DeploymentType(),
38 | new NamespaceType(),
39 | new KafkaAccessType()
40 | );
41 | }
42 |
43 | @BeforeAll
44 | void createResources() {
45 | // apply Kafka and KafkaUser CRDs for the tests
46 | KubeResourceManager.getKubeCmdClient().inNamespace(namespace).exec("apply", "-f", kafkaCrdUrl);
47 | KubeResourceManager.getKubeCmdClient().inNamespace(namespace).exec("apply", "-f", kafkaUserCrdUrl);
48 |
49 | // install KafkaAccessOperator
50 | setupAccessOperator.install();
51 | }
52 |
53 | @AfterAll
54 | void deleteResources() {
55 | // delete KafkaAccessOperator
56 | setupAccessOperator.delete();
57 |
58 | // delete CRDs
59 | KubeResourceManager.getKubeCmdClient().inNamespace(namespace).exec("delete", "-f", kafkaCrdUrl);
60 | KubeResourceManager.getKubeCmdClient().inNamespace(namespace).exec("delete", "-f", kafkaUserCrdUrl);
61 | }
62 | }
63 |
--------------------------------------------------------------------------------