├── .github └── workflows │ ├── build.yml │ ├── deploy.yml │ └── post-deploy.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── examples ├── .gitignore ├── 2_tls_authentication │ ├── client.properties │ ├── get_keys.sh │ └── readme.md ├── 3_simple_acl_authorization │ ├── client.properties │ └── readme.md ├── 4_configuration │ └── readme.md └── 5_connect │ ├── camel_es_connector.properties │ ├── connect.properties │ ├── connect_v2.properties │ ├── elasticsearch.yaml │ ├── readme.md │ ├── scripts │ ├── reset_example.sh │ └── setup_example.sh │ └── twitter_connector.properties ├── kfk ├── argument_extensions.py ├── commands │ ├── acls.py │ ├── clusters.py │ ├── configs.py │ ├── connect │ │ ├── __init__.py │ │ ├── clusters.py │ │ └── connectors.py │ ├── console.py │ ├── env.py │ ├── main.py │ ├── operator.py │ ├── topics.py │ └── users.py ├── commons.py ├── config.py ├── constants.py ├── kubectl_command_builder.py ├── kubernetes_commons.py ├── main.py ├── messages.py ├── option_extensions.py ├── setup.py └── utils.py ├── pyproject.toml ├── tests ├── files │ ├── client.properties │ ├── connect.properties │ ├── connect_with_invalid_url.properties │ ├── connect_with_only_image.properties │ ├── connect_with_zip_jar_plugins.properties │ ├── file-stream-connector.properties │ ├── twitter-connector.properties │ ├── twitter_connector_with_config_change.properties │ └── yaml │ │ ├── kafka-config.yaml │ │ ├── kafka-connect-connector-file-stream.yaml │ │ ├── kafka-connect-connector-twitter.yaml │ │ ├── kafka-connect-connector-twitter_with_config_change.yaml │ │ ├── kafka-connect.yaml │ │ ├── kafka-connect_with_image.yaml │ │ ├── kafka-connect_with_three_replicas.yaml │ │ ├── kafka-connect_with_zip_jar_plugins.yaml │ │ ├── kafka-ephemeral.yaml │ │ ├── kafka-ephemeral_name_updated.yaml │ │ ├── kafka-ephemeral_two_additional_configs_deleted.yaml │ │ ├── kafka-ephemeral_with_one_additional_config.yaml │ │ ├── kafka-ephemeral_with_one_replica.yaml │ │ ├── kafka-ephemeral_with_one_replica_one_zk_replica.yaml │ │ ├── kafka-ephemeral_with_two_additional_configs.yaml │ │ ├── kafka-ephemeral_with_two_replicas.yaml │ │ ├── kubeconfig │ │ ├── topic.yaml │ │ ├── topic_with_one_config.yaml │ │ ├── topic_with_two_configs.yaml │ │ ├── topic_without_config.yaml │ │ ├── user_with_authentication_scram.yaml │ │ ├── user_with_authentication_tls.yaml │ │ ├── user_with_authorization_with_one_topic_acl.yaml │ │ ├── user_with_authorization_with_one_topic_and_one_group_acls.yaml │ │ ├── user_with_authorization_with_three_topic_acls.yaml │ │ ├── user_with_authorization_with_two_topic_acls.yaml │ │ ├── user_with_authorization_with_two_topic_and_one_group_acls.yaml │ │ ├── user_with_one_quota.yaml │ │ ├── user_with_quotas_empty.yaml │ │ └── user_with_two_quotas.yaml ├── test_acls_command.py ├── test_clusters_command.py ├── test_config.py ├── test_configs_command.py ├── test_connect_clusters_command.py ├── test_connect_connectors_command.py ├── test_console_command.py ├── test_kfk.py ├── test_kubectl_cmd_builder.py ├── test_operator_command.py ├── test_setup.py ├── test_topics_command.py ├── test_users_command.py └── test_utils.py └── website └── index.html /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Build 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | python-version: ['3.9', '3.10', '3.11', '3.12', '3.13' ] 18 | steps: 19 | - uses: actions/checkout@v4.2.2 20 | - name: Install setuptools for Python ${{ matrix.python-version }} 21 | run: python -m pip install --upgrade setuptools 22 | 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v4 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | cache: 'pip' 28 | 29 | - name: Install dependencies 30 | run: make install-dependencies 31 | 32 | - name: Run pre-commit 33 | uses: pre-commit/action@v3.0.1 34 | 35 | - name: Retrieve the kubeconfig and decode it to a file 36 | run: | 37 | mkdir ~/.kube 38 | curl https://raw.githubusercontent.com/SystemCraftsman/strimzi-kafka-cli/main/tests/files/yaml/kubeconfig -o ~/.kube/config 39 | 40 | - name: Test 41 | run: | 42 | kfk 43 | make test 44 | 45 | - name: Build 46 | run: make build 47 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | # This workflows will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Deploy 5 | 6 | on: 7 | release: 8 | types: [ published ] 9 | 10 | jobs: 11 | deploy: 12 | runs-on: ubuntu-latest 13 | 14 | outputs: 15 | version: ${{ steps.get_version.outputs.STRIMZI_CLI_VERSION }} 16 | 17 | steps: 18 | - uses: actions/checkout@v4.2.2 19 | - name: Get release version 20 | run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV 21 | 22 | - name: Get code version 23 | id: get_version 24 | run: echo "STRIMZI_CLI_VERSION=$(grep -m 1 'version' pyproject.toml | cut -d '=' -f 2 | xargs)" >> $GITHUB_ENV 25 | 26 | - name: Compare release and code version 27 | if: ${{ env.RELEASE_VERSION != env.STRIMZI_CLI_VERSION }} 28 | run: exit 1 29 | 30 | - name: Set up Python 31 | uses: actions/setup-python@v5.3.0 32 | with: 33 | python-version: '3.x' 34 | 35 | - name: Install dependencies 36 | run: make install-dependencies 37 | 38 | - name: Build 39 | run: make build 40 | 41 | - name: Publish 42 | env: 43 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 44 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 45 | run: | 46 | twine upload dist/* 47 | continue-on-error: true 48 | 49 | - name: Sleep for 3 mins 50 | uses: jakejarvis/wait-action@v0.1.1 51 | with: 52 | time: '180s' 53 | 54 | - name: Bake and push image 55 | uses: docker/build-push-action@v6.9.0 56 | with: 57 | username: ${{ secrets.QUAY_USERNAME }} 58 | password: ${{ secrets.QUAY_PASSWORD }} 59 | registry: quay.io 60 | repository: systemcraftsman/strimzi-kafka-cli 61 | tags: latest,${{ env.STRIMZI_CLI_VERSION }} 62 | path: . 63 | dockerfile: ./Dockerfile 64 | push: true 65 | 66 | upload_build_data_artifact: 67 | runs-on: ubuntu-latest 68 | needs: deploy 69 | steps: 70 | - name: Write version 71 | run: | 72 | printf '{ 73 | "version": "${{ needs.deploy.outputs.version }}" 74 | }' >> build_data.json 75 | - uses: actions/upload-artifact@v4.4.3 76 | with: 77 | name: build_data 78 | path: ./ 79 | -------------------------------------------------------------------------------- /.github/workflows/post-deploy.yml: -------------------------------------------------------------------------------- 1 | name: Post-Deploy 2 | on: 3 | workflow_run: 4 | workflows: 5 | - Deploy 6 | branches-ignore: 7 | - main 8 | types: 9 | - completed 10 | 11 | jobs: 12 | download_build_data_artifact: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Download artifact 16 | uses: actions/github-script@v6 17 | with: 18 | script: | 19 | let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({ 20 | owner: context.repo.owner, 21 | repo: context.repo.repo, 22 | run_id: context.payload.workflow_run.id, 23 | }); 24 | 25 | let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => { 26 | return artifact.name == "build_data" 27 | })[0]; 28 | 29 | let download = await github.rest.actions.downloadArtifact({ 30 | owner: context.repo.owner, 31 | repo: context.repo.repo, 32 | artifact_id: matchArtifact.id, 33 | archive_format: 'zip', 34 | }); 35 | 36 | let fs = require('fs'); 37 | fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/build_data.zip`, Buffer.from(download.data)); 38 | 39 | - name: Unzip artifact 40 | run: unzip build_data.zip 41 | 42 | - name: Return Parsed JSON 43 | uses: actions/github-script@v6 44 | id: return-parsed-json 45 | with: 46 | script: | 47 | let fs = require('fs'); 48 | let data = fs.readFileSync('./build_data.json'); 49 | return JSON.parse(data); 50 | outputs: 51 | image_version: ${{ fromJSON(steps.return-parsed-json.outputs.result).image_version }} 52 | 53 | create_pr_on_brew_repo: 54 | runs-on: ubuntu-20.04 55 | needs: download_build_data_artifact 56 | 57 | permissions: 58 | contents: write 59 | 60 | steps: 61 | - name: Checkout Homebrew repo 62 | uses: actions/checkout@v4 63 | with: 64 | repository: SystemCraftsman/homebrew-strimzi-kafka-cli 65 | ref: refs/heads/master 66 | token: ${{ secrets.GITHUB_TOKEN }} 67 | 68 | - name: Apply changes 69 | run: | 70 | version="$(sed -n 's/version \"\(.*\)\"/\1/p' strimzi-kafka-cli.rb | sed -e 's/^[[:space:]]*//')" 71 | sed -i -e "s/$version/${{ needs.download_build_data_artifact.outputs.image_version }}/g" strimzi-kafka-cli.rb 72 | 73 | - name: Create Pull Request 74 | uses: peter-evans/create-pull-request@v5 75 | with: 76 | token: ${{ secrets.GITHUB_TOKEN }} 77 | commit-message: Update to version ${{ needs.download_build_data_artifact.outputs.image_version }} 78 | committer: GitHub 79 | author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> 80 | branch: version-update-${{ needs.download_build_data_artifact.outputs.image_version }} 81 | assignees: mabulgu 82 | reviewers: mabulgu 83 | title: '[Automated PR] Update image tag and revision for prod upgrade' 84 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | website/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # pycharm 132 | .idea/ 133 | 134 | #vscode 135 | .vscode 136 | 137 | #macOs 138 | .DS_Store 139 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: end-of-file-fixer 6 | - id: trailing-whitespace 7 | - id: check-yaml 8 | - id: check-docstring-first 9 | - id: check-executables-have-shebangs 10 | - id: check-toml 11 | - id: check-case-conflict 12 | - id: check-added-large-files 13 | args: [ '--maxkb=2048' ] 14 | exclude: ^logo/ 15 | - id: detect-private-key 16 | - id: forbid-new-submodules 17 | - id: pretty-format-json 18 | args: [ '--autofix', '--no-sort-keys', '--indent=4' ] 19 | - id: end-of-file-fixer 20 | - id: mixed-line-ending 21 | - id: debug-statements 22 | 23 | 24 | - repo: https://github.com/PyCQA/isort 25 | rev: 5.13.2 26 | hooks: 27 | - id: isort 28 | 29 | - repo: https://github.com/PyCQA/docformatter 30 | rev: v1.7.5 31 | hooks: 32 | - id: docformatter 33 | 34 | 35 | - repo: https://github.com/PyCQA/flake8 36 | rev: 7.1.1 37 | hooks: 38 | - id: flake8 39 | entry: flake8 40 | additional_dependencies: [ Flake8-pyproject ] 41 | 42 | 43 | - repo: https://github.com/PyCQA/bandit 44 | rev: '1.7.10' 45 | hooks: 46 | - id: bandit 47 | args: [ "-c", "pyproject.toml" ] 48 | additional_dependencies: [ "bandit[toml]" ] 49 | 50 | 51 | - repo: https://github.com/psf/black 52 | rev: 24.10.0 53 | hooks: 54 | - id: black 55 | language_version: python3 56 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | # Contributor Covenant Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | We as members, contributors, and leaders pledge to make participation in our 7 | community a harassment-free experience for everyone, regardless of age, body 8 | size, visible or invisible disability, ethnicity, sex characteristics, gender 9 | identity and expression, level of experience, education, socio-economic status, 10 | nationality, personal appearance, race, caste, color, religion, or sexual 11 | identity and orientation. 12 | 13 | We pledge to act and interact in ways that contribute to an open, welcoming, 14 | diverse, inclusive, and healthy community. 15 | 16 | ## Our Standards 17 | 18 | Examples of behavior that contributes to a positive environment for our 19 | community include: 20 | 21 | * Demonstrating empathy and kindness toward other people 22 | * Being respectful of differing opinions, viewpoints, and experiences 23 | * Giving and gracefully accepting constructive feedback 24 | * Accepting responsibility and apologizing to those affected by our mistakes, 25 | and learning from the experience 26 | * Focusing on what is best not just for us as individuals, but for the overall 27 | community 28 | 29 | Examples of unacceptable behavior include: 30 | 31 | * The use of sexualized language or imagery, and sexual attention or advances of 32 | any kind 33 | * Trolling, insulting or derogatory comments, and personal or political attacks 34 | * Public or private harassment 35 | * Publishing others' private information, such as a physical or email address, 36 | without their explicit permission 37 | * Other conduct which could reasonably be considered inappropriate in a 38 | professional setting 39 | 40 | ## Enforcement Responsibilities 41 | 42 | Community leaders are responsible for clarifying and enforcing our standards of 43 | acceptable behavior and will take appropriate and fair corrective action in 44 | response to any behavior that they deem inappropriate, threatening, offensive, 45 | or harmful. 46 | 47 | Community leaders have the right and responsibility to remove, edit, or reject 48 | comments, commits, code, wiki edits, issues, and other contributions that are 49 | not aligned to this Code of Conduct, and will communicate reasons for moderation 50 | decisions when appropriate. 51 | 52 | ## Scope 53 | 54 | This Code of Conduct applies within all community spaces, and also applies when 55 | an individual is officially representing the community in public spaces. 56 | Examples of representing our community include using an official e-mail address, 57 | posting via an official social media account, or acting as an appointed 58 | representative at an online or offline event. 59 | 60 | ## Enforcement 61 | 62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 63 | reported to the community leaders responsible for enforcement at 64 | [INSERT CONTACT METHOD]. 65 | All complaints will be reviewed and investigated promptly and fairly. 66 | 67 | All community leaders are obligated to respect the privacy and security of the 68 | reporter of any incident. 69 | 70 | ## Enforcement Guidelines 71 | 72 | Community leaders will follow these Community Impact Guidelines in determining 73 | the consequences for any action they deem in violation of this Code of Conduct: 74 | 75 | ### 1. Correction 76 | 77 | **Community Impact**: Use of inappropriate language or other behavior deemed 78 | unprofessional or unwelcome in the community. 79 | 80 | **Consequence**: A private, written warning from community leaders, providing 81 | clarity around the nature of the violation and an explanation of why the 82 | behavior was inappropriate. A public apology may be requested. 83 | 84 | ### 2. Warning 85 | 86 | **Community Impact**: A violation through a single incident or series of 87 | actions. 88 | 89 | **Consequence**: A warning with consequences for continued behavior. No 90 | interaction with the people involved, including unsolicited interaction with 91 | those enforcing the Code of Conduct, for a specified period of time. This 92 | includes avoiding interactions in community spaces as well as external channels 93 | like social media. Violating these terms may lead to a temporary or permanent 94 | ban. 95 | 96 | ### 3. Temporary Ban 97 | 98 | **Community Impact**: A serious violation of community standards, including 99 | sustained inappropriate behavior. 100 | 101 | **Consequence**: A temporary ban from any sort of interaction or public 102 | communication with the community for a specified period of time. No public or 103 | private interaction with the people involved, including unsolicited interaction 104 | with those enforcing the Code of Conduct, is allowed during this period. 105 | Violating these terms may lead to a permanent ban. 106 | 107 | ### 4. Permanent Ban 108 | 109 | **Community Impact**: Demonstrating a pattern of violation of community 110 | standards, including sustained inappropriate behavior, harassment of an 111 | individual, or aggression toward or disparagement of classes of individuals. 112 | 113 | **Consequence**: A permanent ban from any sort of public interaction within the 114 | community. 115 | 116 | ## Attribution 117 | 118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 119 | version 2.1, available at 120 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 121 | 122 | Community Impact Guidelines were inspired by 123 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 127 | [https://www.contributor-covenant.org/translations][translations]. 128 | 129 | [homepage]: https://www.contributor-covenant.org 130 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 131 | [Mozilla CoC]: https://github.com/mozilla/diversity 132 | [FAQ]: https://www.contributor-covenant.org/faq 133 | [translations]: https://www.contributor-covenant.org/translations 134 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Welcome to [strimzi-kafka-cli](https://github.com/systemcraftsman/strimzi-kafka-cli)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt about the contributing guide, please feel free to [state it clearly in an issue](https://github.com/systemcraftsman/strimzi-kafka-cli/issues/new). 4 | 5 | ## Contributing 6 | 7 | ### Contributor 8 | 9 | We are very happy that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: 10 | 11 | - Your work will be distributed under [Apache-2.0](LICENSE.md) once your pull request is merged 12 | - Your submitted work fulfils or mostly fulfils our styles and standards 13 | 14 | __New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. 15 | 16 | __Improving comments__ and __writing proper tests__ are also highly welcome. 17 | 18 | ### Contribution 19 | 20 | We appreciate any contribution, from fixing a grammar mistake in a comment to implementing complex algorithms. Please read this section if you are contributing your work. 21 | 22 | Your contribution will be tested by our [automated testing on Github CI/CD](https://github.com/SystemCraftsman/strimzi-kafka-cli/actions) to save time and mental energy. After you have submitted your pull request, you should see the Github CI/CD tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button and try to read through the Github CI/CD output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. 23 | 24 | Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitLab will use this tag to auto-close the issue when the PR is merged. 25 | 26 | ## Getting Started with Development 27 | 28 | ### Cloning the Repository 29 | 30 | To start your development journey, you will need to clone the repository from Github. Cloning a repository creates a local copy of the project on your machine, allowing you to make changes and contribute to the codebase. 31 | 32 | Follow these steps to clone the repository: 33 | 34 | 1. Open your web browser and navigate to the repository on Github. 35 | 2. On the repository page, click on the Fork button in the top-right corner. This will create a personal copy of the repository under your GitLab account. 36 | 3. Once the repository is forked, go to your GitLab profile and navigate to the forked repository. 37 | 4. Click on the "Clone" button for clone to repository. 38 | 5. Copy the URL provided in the cloning options. It should look like https://github.com/your-username/strimzi-kafka-cli.git. 39 | 6. Open your terminal or command prompt on your local machine. 40 | 7. Navigate to the directory where you want to clone the repository using the cd command. For example, to navigate to your home directory, you can use cd ~. 41 | 8. In the terminal, enter the following command to clone the repository: 42 | 43 | ```bash 44 | git clone https://github.com/your-username/strimzi-kafka-cli.git 45 | ``` 46 | Replace `your-username` with your GitLab username. 47 | 48 | 9. Press Enter to execute the command. Git will now download the repository and create a local copy on your machine. 49 | 50 | 10. Once the cloning process is complete, you can navigate into the cloned repository using the cd command. For example: 51 | 52 | ```bash 53 | cd strimzi-kafka-cli 54 | ``` 55 | 56 | Congratulations! You have successfully cloned the repository and are ready to start development. In the next sections, we will cover the setup and configuration steps required for your development environment. 57 | 58 | ### Pre-commit Tool 59 | 60 | This project utilizes the [pre-commit](https://pre-commit.com/) tool to maintain code quality and consistency. Before submitting a pull request or making any commits, it is important to run the pre-commit tool to ensure that your changes meet the project's guidelines. 61 | 62 | To run the pre-commit tool, follow these steps: 63 | 64 | 1. Install pre-commit by running the following command: `poetry install`. It will not only install pre-commit but also install all the deps and dev-deps of project 65 | 66 | 2. Once pre-commit is installed, navigate to the project's root directory. 67 | 68 | 3. Run the command `pre-commit run --all-files`. This will execute the pre-commit hooks configured for this project against the modified files. If any issues are found, the pre-commit tool will provide feedback on how to resolve them. Make the necessary changes and re-run the pre-commit command until all issues are resolved. 69 | 70 | 4. You can also install pre-commit as a git hook by execute `pre-commit install`. Every time you made `git commit` pre-commit run automatically for you. 71 | 72 | ### Coding Style 73 | 74 | We want your work to be readable by others; therefore, we encourage you to note the following: 75 | 76 | - Please write in Python 3.8. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. 77 | - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. 78 | - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. 79 | - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. 80 | - Please follow the [Python Naming Conventions](https://pep8.org/#prescriptive-naming-conventions) so variable_names and function_names should be lower_case, CONSTANTS in UPPERCASE, ClassNames should be CamelCase, etc. 81 | 82 | - We encourage the use of Python [f-strings](https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python) where they make the code easier to read. 83 | 84 | ### Conventional Commits 85 | 86 | To maintain a consistent commit message format and enable automated release management, we follow the Conventional Commits specification. Please adhere to the following guidelines when making commits: 87 | 88 | - Use the format: `(): ` 89 | 90 | - ``: Represents the type of change being made. It can be one of the following: 91 | - **feat**: A new feature 92 | - **fix**: A bug fix 93 | - **docs**: Documentation changes 94 | - **style**: Code style/formatting changes 95 | - **refactor**: Code refactoring 96 | - **test**: Adding or modifying tests 97 | - **chore**: Other changes that don't modify code or test cases 98 | 99 | - ``: (Optional) Indicates the scope of the change, such as a module or component name. 100 | 101 | - ``: A concise and meaningful description of the change. 102 | 103 | - Separate the type, scope, and description with colons and a space. 104 | 105 | - Use the imperative mood in the description. For example, "Add feature" instead of "Added feature" or "Adding feature". 106 | 107 | - Use present tense verbs. For example, "Fix bug" instead of "Fixed bug" or "Fixes bug". 108 | 109 | - Start the description with a capital letter and do not end it with a period. 110 | 111 | - If the commit addresses an open issue, include the issue number at the end of the description using the `#` symbol. For example, `fix(user): Resolve login issue #123`. 112 | 113 | Example commit messages: 114 | 115 | - `feat(user): Add user registration feature` 116 | - `fix(auth): Fix authentication logic` 117 | - `docs(readme): Update project documentation` 118 | - `style(css): Format stylesheets` 119 | - `refactor(api): Simplify API endpoints` 120 | - `test(utils): Add test cases for utility functions` 121 | 122 | By following these guidelines, we can maintain a clear and meaningful commit history that helps with code review, collaboration, and automated release processes. 123 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.2-alpine 2 | USER root 3 | RUN apk add --update \ 4 | curl \ 5 | && rm -rf /var/cache/apk/* 6 | RUN adduser -D kfkuser 7 | RUN pip install strimzi-kafka-cli==0.1.0a79 8 | USER kfkuser 9 | RUN mkdir /home/kfkuser/.kube 10 | RUN curl https://raw.githubusercontent.com/SystemCraftsman/strimzi-kafka-cli/main/tests/files/yaml/kubeconfig -o /home/kfkuser/.kube/config 11 | RUN kfk --version 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | 3 | PYPI_USER= 4 | PYPI_SERVER= 5 | 6 | DIST_FILES=dist/$(shell ls -1A dist) 7 | PIP_LOG=pip-log.txt 8 | 9 | default: build 10 | 11 | clean: 12 | -rm -rf dist build .eggs *.egg-info ${PIP_LOG} 13 | 14 | lint: 15 | python -m flake8 16 | 17 | test: 18 | python -m pytest 19 | 20 | build: clean 21 | python -m build; twine check --strict dist/* 22 | 23 | install-dependencies: 24 | pip install ".[dev]" 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Build](https://github.com/systemcraftsman/strimzi-kafka-cli/workflows/Build/badge.svg) ![Deploy](https://github.com/systemcraftsman/strimzi-kafka-cli/workflows/Deploy/badge.svg) [![PyPI](https://img.shields.io/pypi/v/strimzi-kafka-cli)](https://pypi.org/project/strimzi-kafka-cli/) [![Downloads](https://static.pepy.tech/badge/strimzi-kafka-cli)](https://pepy.tech/project/strimzi-kafka-cli) [![License](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0) [![Strimzi](https://img.shields.io/badge/Strimzi-0.40.0-blue)](https://github.com/strimzi/strimzi-kafka-operator/releases/tag/0.40.0) [![AMQ Streams](https://img.shields.io/badge/AMQ Streams-2.6.0-red)](https://access.redhat.com/documentation/en-us/red_hat_amq_streams/2.6) 2 | 3 | ![Strimzi CLI logo](https://github.com/SystemCraftsman/strimzi-kafka-cli/assets/10568159/596ea147-9594-4262-a0c3-d63fa14f0577) 4 | 5 | # Strimzi Kafka CLI 6 | 7 | Strimzi Kafka CLI is a CLI that helps traditional Apache Kafka users -both developers and administrators- to easily adapt to [Strimzi](https://strimzi.io/), 8 | a [Kubernetes operator](https://operatorhub.io/operator/strimzi-kafka-operator) for [Apache Kafka](https://kafka.apache.org/). 9 | 10 | The main intention is to ramp up Strimzi usage by creating a similar CLI experience with the traditional Apache Kafka tools that mostly starts with `kafka-*` prefix under the `bin` directory in an ordinary Kafka package. 11 | 12 | Strimzi Kafka CLI uses the `kfk` command as an abbreviation for "**K**afka **F**or **K**ubernetes" or simply "**k** a **f** **k** a" which reminds of the `kafka-*` prefix of the ordinary Kafka script file names. 13 | 14 | While having similar set of commands or options for some of the common objects, Strimzi Kafka CLI has some extra capabilities for managing or configuring Strimzi related resources. 15 | 16 | Following are the commands of the current version of Strimzi Kafka CLI, that are used for different purposes: 17 | 18 | ``` bash 19 | Usage: kfk [OPTIONS] COMMAND [ARGS]... 20 | 21 | Strimzi Kafka CLI 22 | 23 | Options: 24 | --version Show the version and exit. 25 | --help Show this message and exit. 26 | 27 | Commands: 28 | acls Manages ACLs on Kafka. 29 | clusters Creates, alters, deletes, describes Kafka cluster(s). 30 | configs Adds/Removes entity config for a topic, client, user or... 31 | connect Creates, alters, deletes, describes Kafka connect... 32 | console-consumer Reads data from Kafka topics and outputs it to standard... 33 | console-producer Reads data from standard input and publish it to Kafka. 34 | env Prints the environment variable values for Strimzi Kafka CLI 35 | operator Installs/Uninstalls Strimzi Kafka Operator 36 | topics Creates, alters, deletes, describes Kafka topic(s). 37 | users Creates, alters, deletes, describes Kafka users(s). 38 | ``` 39 | 40 | Please take a look at the relevant article [Strimzi Kafka CLI: Managing Strimzi in a Kafka Native Way](https://www.systemcraftsman.com/2020/08/25/strimzi-kafka-cli-managing-strimzi-in-a-kafka-native-way/) for more details. 41 | 42 | ## Installation 43 | 44 | ### Using Python Package Installer 45 | 46 | ``` bash 47 | pip install strimzi-kafka-cli --user 48 | ``` 49 | 50 | Or to install Strimzi Kafka CLI in an isolated environment, you can simply use [pipx](https://pypa.github.io/pipx/): 51 | 52 | ``` bash 53 | pipx install strimzi-kafka-cli 54 | ``` 55 | 56 | ### Using Homebrew 57 | 58 | ``` bash 59 | #Tap the homebrew repository first. 60 | brew tap systemcraftsman/strimzi-kafka-cli 61 | 62 | #Install Strimzi Kafka CLI 63 | brew install strimzi-kafka-cli 64 | ``` 65 | 66 | > Installing the CLI by using Homebrew already uses a virtual environment, so you don't have to worry about your main Python environment. 67 | 68 | Project requires: Python >=3.8 69 | 70 | ## Examples 71 | 72 | * [TLS Authentication](https://github.com/systemcraftsman/strimzi-kafka-cli/tree/master/examples/2_tls_authentication) 73 | * [Simple ACL Authorization](https://github.com/systemcraftsman/strimzi-kafka-cli/tree/master/examples/3_simple_acl_authorization) 74 | * [Topic, User and Broker Configuration](https://github.com/systemcraftsman/strimzi-kafka-cli/tree/master/examples/4_configuration) 75 | * [Kafka Connect](https://github.com/systemcraftsman/strimzi-kafka-cli/tree/master/examples/5_connect) 76 | 77 | ## Dependencies 78 | ### Python Dependencies 79 | Please see [pyproject.toml](https://github.com/SystemCraftsman/strimzi-kafka-cli/blob/main/pyproject.toml) file. 80 | ### External Dependencies 81 | `kubectl` and `Strimzi resources` are the tools that Strimzi Kafka CLI uses. These dependencies are automatically downloaded when the first `kfk` command is run. You can always check the dependency versions of your CLI with the following command: 82 | 83 | ``` bash 84 | kfk --version 85 | ``` 86 | 87 | You can change where you want to locate the `kubectl`, `Strimzi resources`, or `Strimzi CLI` files/folders. You can use the following environment variables: 88 | 89 | **STRIMZI_KAFKA_CLI_BASE_PATH:** Set this if you want to have a custom Strimzi Kafka CLI folder. It is `~/.strimzi-kafka-cli` as default. 90 | 91 | **STRIMZI_KAFKA_CLI_STRIMZI_PATH:** Set this if you want to use a custom Strimzi/AMQ Streams. We only recommend this when using AMQ Streams instead of Strimzi. 92 | 93 | **STRIMZI_KAFKA_CLI_KUBECTL_PATH:** Set this if you want to use a custom kubectl. 94 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | *.jks 2 | *.p12 3 | *.bck 4 | -------------------------------------------------------------------------------- /examples/2_tls_authentication/client.properties: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.truststore.location=./truststore.jks 3 | ssl.truststore.password=123456 4 | ssl.keystore.location=./user.p12 5 | ssl.keystore.password=123456 6 | -------------------------------------------------------------------------------- /examples/2_tls_authentication/get_keys.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | rm truststore.jks user.p12 4 | 5 | oc extract secret/my-user -n kafka --keys=user.crt --to=- > user.crt 6 | oc extract secret/my-user -n kafka --keys=user.key --to=- > user.key 7 | oc extract secret/my-cluster-cluster-ca-cert -n kafka --keys=ca.crt --to=- > ca.crt 8 | 9 | echo "yes" | keytool -import -trustcacerts -file ca.crt -keystore truststore.jks -storepass 123456 10 | RANDFILE=/tmp/.rnd openssl pkcs12 -export -in user.crt -inkey user.key -name my-user -password pass:123456 -out user.p12 11 | 12 | rm user.crt user.key ca.crt 13 | -------------------------------------------------------------------------------- /examples/2_tls_authentication/readme.md: -------------------------------------------------------------------------------- 1 | # TLS Authentication 2 | 3 | In this example we will demonstrate setting up TLS authentication for Strimzi using Strimzi Kafka CLI. So let's get started! 4 | 5 | 6 | First lets list the clusters and see our clusters list. 7 | 8 | ```shell 9 | kfk clusters --list 10 | ``` 11 | 12 | --- 13 | **IMPORTANT** 14 | 15 | If you don't have any Kafka cluster that is created on your OpenShift/Kubernetes, pls. see the [Strimzi Quick Start](https://strimzi.io/quickstarts/) document or simply use: 16 | 17 | ```shell 18 | kfk clusters --create --cluster my-cluster -n kafka 19 | ``` 20 | 21 | --- 22 | 23 | 24 | Assuming we have a cluster called `my-cluster` already set up for us let's list the topics in the cluster 25 | 26 | ```shell 27 | kfk topics --list -n kafka -c my-cluster 28 | ``` 29 | 30 | If it is a new cluster probably there is no topic living in the cluster yet. So let's create a new topic for our example. 31 | 32 | Create a topic called `my-topic` with 12 partitions and replication 33 | factor 3 in `my-cluster` cluster 34 | 35 | ```shell 36 | kfk topics --create --topic my-topic --partitions 12 --replication-factor 3 -n kafka -c my-cluster 37 | ``` 38 | 39 | Run console producer to produce messages to `my-topic` 40 | 41 | ```shell 42 | kfk console-producer --topic my-topic -n kafka -c my-cluster 43 | ``` 44 | 45 | Run console consumer to consume messages from `my-topic` 46 | 47 | ```shell 48 | kfk console-consumer --topic my-topic -n kafka -c my-cluster 49 | ``` 50 | 51 | After being sure to produce and consume messages without a problem, now lets enable the authentication for TLS. In Strimzi, if you want to 52 | enable authentication, there are listeners configurations that provides 53 | a couple of authentication methodologies like `scram-sha-512`, `oauth` 54 | and `tls`. 55 | 56 | In order to enable the authentication we have to alter our Kafka 57 | cluster: 58 | 59 | ```shell 60 | kfk clusters --alter --cluster my-cluster -n kafka 61 | ``` 62 | 63 | An editor will be opened in order to change the Strimzi Kafka cluster 64 | configuration. Since Strimzi Kafka cluster resource has many items 65 | inside, for now, we don’t have any special property flag in order to 66 | directly set the value while altering. That’s why we only open the 67 | cluster custom resource available for editing. 68 | 69 | In the opened editor we have to add the following listeners as: 70 | 71 | ```yaml 72 | listeners: 73 | plain: {} 74 | tls: 75 | authentication: 76 | type: tls 77 | ``` 78 | 79 | If you want to fully secure your cluster you have to also change the 80 | plain listener for authentication, because with the upper configuration 81 | unless we use a client configuration that doesn’t use SSL security 82 | protocol it will use the plain one which doesn’t require any 83 | authentication. In order to do that, we can tell the plain listener in 84 | cluster config to use one of the authentication methodologies among 85 | `scram-sha-512` or `oauth`. In this example we will set it as 86 | `scram-sha-512` but we will show the authentication via `scram-sha-512` 87 | in another example. 88 | 89 | So the latest listener definition should be like this: 90 | 91 | ```yaml 92 | listeners: 93 | plain: 94 | authentication: 95 | type: scram-sha-512 96 | tls: 97 | authentication: 98 | type: tls 99 | ``` 100 | 101 | Save the file and see the successfully edited message. 102 | 103 | After the configuration change all the broker pods will be updated one 104 | by one, thanks to our operator. You can watch the pods state since we 105 | have to wait till each of them are in `ready` state. 106 | 107 | ```shell 108 | watch kubectl get pods -n kafka 109 | ``` 110 | 111 | Now lets run our console producer and consumer again and see what 112 | happens: 113 | 114 | ```shell 115 | kfk console-producer --topic my-topic -n kafka -c my-cluster 116 | ``` 117 | 118 | ```shell 119 | kfk console-consumer --topic my-topic -n kafka -c my-cluster 120 | ``` 121 | 122 | You got some WARN log messages saying `disconnected (org.apache.kafka.clients.NetworkClient)` from both 123 | producer and consumer right? 124 | 125 | When we check the first pod logs that we ran the producer and consumer 126 | commands we can see the failed authentication message: 127 | 128 | ```shell 129 | kubectl logs -f my-cluster-kafka-0 -c kafka -n kafka 130 | ``` 131 | 132 | 2020-09-22 11:18:33,122 INFO [SocketServer brokerId=0] Failed authentication with /10.130.2.58 (Unexpected Kafka request of type METADATA during SASL handshake.) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(PLAIN-9092)-SASL_PLAINTEXT-3] 133 | 134 | Since we are not yet using SSL for authentication, but the PLAIN 135 | connection method, which we set up as `scram-sha-512`, we can not 136 | authenticate to the Strimzi Kafka cluster. 137 | 138 | In order to login this cluster via SSL authentication we have to; 139 | 140 | * Create a user that uses TLS authentication 141 | * Create truststore and keystore files by getting the certificates from Openshift/Kubernetes cluster 142 | * Create a client.properties file that is to be used by producer and consumer clients in order to be able to authenticate via TLS 143 | 144 | Let's first create the user with the name `my-user`: 145 | 146 | ```shell 147 | kfk users --create --user my-user --authentication-type tls -n kafka -c my-cluster 148 | ``` 149 | 150 | After creating the user let's describe it to view a few attributes: 151 | 152 | ```shell 153 | kfk users --describe --user my-user -n kafka -c my-cluster 154 | ``` 155 | At the bottom of the details of the user; in the status section, you can see a secret and a username definition: 156 | 157 | ``` 158 | Name: my-user 159 | Namespace: kafka 160 | Labels: strimzi.io/cluster=my-cluster 161 | Annotations: 162 | API Version: kafka.strimzi.io/v1beta1 163 | Kind: KafkaUser 164 | Metadata: 165 | Creation Timestamp: 2020-09-21T12:54:52Z 166 | Generation: 3 167 | Resource Version: 53996010 168 | Self Link: /apis/kafka.strimzi.io/v1beta1/namespaces/kafka/kafkausers/my-user 169 | UID: 1c1dad0c-4e7a-4e63-933c-a785e6941021 170 | Spec: 171 | Authentication: 172 | Type: tls 173 | Status: 174 | Observed Generation: 3 175 | Secret: my-user 176 | Username: CN=my-user 177 | Events: 178 | ``` 179 | 180 | This means that a secret named `my-user` is created for this user and with the username `CN=my-user` as a common name definition. 181 | 182 | In the secrets there are private and public keys that should be imported in the truststore and the keystore files that will be created shortly. 183 | 184 | ```shell 185 | kubectl describe secret/my-user -n kafka 186 | ``` 187 | 188 | ``` 189 | Name: my-user 190 | Namespace: kafka 191 | Labels: app.kubernetes.io/instance=my-user 192 | app.kubernetes.io/managed-by=strimzi-user-operator 193 | app.kubernetes.io/name=strimzi-user-operator 194 | app.kubernetes.io/part-of=strimzi-my-user 195 | strimzi.io/cluster=my-cluster 196 | strimzi.io/kind=KafkaUser 197 | Annotations: 198 | 199 | Type: Opaque 200 | 201 | Data 202 | ==== 203 | ca.crt: 1164 bytes 204 | user.crt: 1009 bytes 205 | user.key: 1704 bytes 206 | user.p12: 2364 bytes 207 | user.password: 12 bytes 208 | ``` 209 | 210 | In order create the truststore and keystore files just run the get_keys.sh file in the [example directory](https://github.com/systemcraftsman/strimzi-kafka-cli/blob/master/examples/2_tls_authentication/get_keys.sh): 211 | 212 | ```shell 213 | chmod a+x ./get_keys.sh;./get_keys.sh 214 | ``` 215 | 216 | This will generate two files: 217 | 218 | * `truststore.jks` for the client's truststore definition 219 | * `user.p12` for the client's keystore definition 220 | 221 | TLS authentications are made with bidirectional TLS handshake. In order to do this apart from a truststore that has the public key imported, a keystore file that has both the public and private keys has to be created and defined in the client configuration file. 222 | 223 | So let's create our client configuration file. 224 | 225 | Our client configuration should have a few definitions like: 226 | 227 | * Security protocol 228 | * Truststore location and password 229 | * Keystore location and password 230 | 231 | Security protocol should be `SSL` and since the truststore and keystore files are located in the example directory the client config file should be something like this: 232 | 233 | ```properties 234 | security.protocol=SSL 235 | ssl.truststore.location=./truststore.jks 236 | ssl.truststore.password=123456 237 | ssl.keystore.location=./user.p12 238 | ssl.keystore.password=123456 239 | ``` 240 | 241 | Since the `get_keys.sh` script sets the store passwords as `123456` we use it in the config file. 242 | 243 | Save it as client.properties (or just use the one that is already created in this directory with the name `client.properties`) 244 | 245 | Now it's time to test it. Let's call the console producer and consumer again, but this time with the client configuration: 246 | 247 | --- 248 | **IMPORTANT** 249 | 250 | Be careful to run producer and consumer commands from example's directory. Otherwise you have to change the truststore and keystore paths in the client.properties file. 251 | 252 | --- 253 | 254 | ```shell 255 | kfk console-producer --topic my-topic -n kafka -c my-cluster --producer.config client.properties 256 | ``` 257 | The console producer seems to be working just fine since we can produce messages. 258 | 259 | ``` 260 | >message1 261 | >message2 262 | >message3 263 | > 264 | ``` 265 | 266 | Let's run the console consumer to consume the just produced messages: 267 | 268 | ```shell 269 | kfk console-consumer --topic my-topic -n kafka -c my-cluster --consumer.config client.properties 270 | ``` 271 | 272 | ``` 273 | message1 274 | message2 275 | message3 276 | ``` 277 | 278 | Worked like a charm! 279 | 280 | We are able to configure the Strimzi cluster and use the client configurations for TLS authentication easily with Strimzi Kafka CLI. 281 | -------------------------------------------------------------------------------- /examples/3_simple_acl_authorization/client.properties: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.truststore.location=./truststore.jks 3 | ssl.truststore.password=123456 4 | ssl.keystore.location=./user.p12 5 | ssl.keystore.password=123456 6 | group.id=my-group 7 | -------------------------------------------------------------------------------- /examples/3_simple_acl_authorization/readme.md: -------------------------------------------------------------------------------- 1 | # Simple ACL Authorization 2 | 3 | In the previous example we implemented TLS authentication on Strimzi Kafka cluster with Strimzi Kafka CLI. In this example, we will be continuing with enabling the ACL authorization, so that we will be able to restrict access to our topics and only allow the users or groups we want to. 4 | 5 | 6 | Let's first see our cluster list. 7 | 8 | ```shell 9 | kfk clusters --list 10 | ``` 11 | ``` 12 | NAMESPACE NAME DESIRED KAFKA REPLICAS DESIRED ZK REPLICAS 13 | kafka my-cluster 3 3 14 | ``` 15 | 16 | --- 17 | **IMPORTANT** 18 | 19 | You should have a cluster called `my-cluster` on the namespace `kafka` we created before. If you don't have the cluster and haven't yet done the authentication part please go back to the previous example and do it first since for authorization you will need authentication to be set up before. 20 | 21 | Also please copy the `truststore.jks` and the `user.p12` files or recreate them as explained in the previous example and put it along the example folder which we ignore in git. 22 | 23 | --- 24 | 25 | Considering you have the cluster `my-cluster` on namespace `kafka`, let's list our topics to see the topic we created before: 26 | 27 | ```shell 28 | kfk topics --list -n kafka -c my-cluster 29 | ``` 30 | 31 | ``` 32 | NAME PARTITIONS REPLICATION FACTOR 33 | consumer-offsets---84e7a678d08f4bd226872e5cdd4eb527fadc1c6a 50 3 34 | my-topic 12 3 35 | ``` 36 | 37 | Lastly let's list our user that we created previously, which we will be setting the authorization for. 38 | 39 | ```shell 40 | kfk users --list -n kafka -c my-cluster 41 | ``` 42 | 43 | ``` 44 | NAME AUTHENTICATION AUTHORIZATION 45 | my-user tls 46 | ``` 47 | 48 | As you can see we have the `my-user` user that we created and authenticated in the previous example. 49 | 50 | Now let's configure our cluster to enable for ACL authorization. We have to alter our cluster for this: 51 | 52 | ```shell 53 | kfk clusters --alter --cluster my-cluster -n kafka 54 | ``` 55 | 56 | and put the simple authorization definitions under `kafka` like the following: 57 | 58 | ```yaml 59 | authorization: 60 | type: simple 61 | ``` 62 | 63 | After saving the cluster configuration wait for the brokers to be rolling updated by checking their status: 64 | 65 | ```shell 66 | watch kubectl get pods -n kafka 67 | ``` 68 | 69 | Now it's time to run the producer and consumer to check if authorization is enabled: 70 | 71 | ```shell 72 | kfk console-producer --topic my-topic -n kafka -c my-cluster --producer.config client.properties 73 | ``` 74 | 75 | ``` 76 | ERROR Error when sending message to topic my-topic with key: null, value: 4 bytes with error: (org.apache.kafka.clients.producer.internals.ErrorLoggingCallback) 77 | org.apache.kafka.common.errors.TopicAuthorizationException: Not authorized to access topics: [my-topic] 78 | ``` 79 | 80 | ```shell 81 | kfk console-consumer --topic my-topic -n kafka -c my-cluster --consumer.config client.properties 82 | ``` 83 | 84 | ``` 85 | ERROR Error processing message, terminating consumer process: (kafka.tools.ConsoleConsumer$) 86 | org.apache.kafka.common.errors.TopicAuthorizationException: Not authorized to access topics: [my-topic] 87 | Processed a total of 0 messages 88 | ``` 89 | 90 | As you might also observe, both the producer and consumer returned `TopicAuthorizationException` by saying `Not authorized to access topics: [my-topic]`. So let's define authorization access to this topic for the user `my-user`. 91 | 92 | In order to enable user's authorization, we have to both define the user's authorization type as `simple` for it to use `SimpleAclAuthorizer` of Apache Kafka, and the ACL definitions for the relevant topic -in this case it is `my-topic`. To do this, we need to alter the user with the following command options: 93 | 94 | ```shell 95 | kfk users --alter --user my-user --authorization-type simple --add-acl --resource-type topic --resource-name my-topic -n kafka -c my-cluster 96 | ``` 97 | 98 | The `--add-acl` option requires arguments like: 99 | 100 | ``` 101 | 102 | --operation TEXT Operation that is being allowed or denied. 103 | (default: All) 104 | 105 | --host TEXT Host which User will have access. (default: 106 | *) 107 | 108 | --type [allow|deny] Operation type for ACL. (default: allow) 109 | --resource-type TEXT This argument is mutually inclusive with 110 | ['add_acl', 'delete_acl'] 111 | 112 | --resource-name TEXT This argument is mutually inclusive with 113 | ['add_acl', 'delete_acl'] 114 | 115 | ``` 116 | 117 | In this example we only used `--resource-type` and `--resource-name` since those are the required fields and others have some defaults that we could use. 118 | 119 | So in this case we used the defaults of `type:allow`, `host:*` and `operation:All`. The equal command should look like this: 120 | 121 | ```shell 122 | kfk users --alter --user my-user --authorization-type simple --add-acl --resource-type topic --resource-name my-topic --type allow --host * --operation All -n kafka -c my-cluster 123 | ``` 124 | 125 | In order to see the ACL that is defined for allowing all operations of `my-topic` for the user `my-user`, let's describe it, in this case as YAML format: 126 | 127 | ```shell 128 | kfk users --describe --user my-user -n kafka -c my-cluster -o yaml 129 | ``` 130 | 131 | ``` 132 | apiVersion: kafka.strimzi.io/v1beta1 133 | kind: KafkaUser 134 | metadata: 135 | ... 136 | spec: 137 | authentication: 138 | type: tls 139 | authorization: 140 | type: simple 141 | acls: 142 | - host: '*' 143 | operation: All 144 | resource: 145 | name: my-topic 146 | patternType: literal 147 | type: topic 148 | type: allow 149 | status: 150 | ... 151 | ``` 152 | 153 | As you can see the user has the authorization defined as `simple` and ACL that allows all (read, write, describe) access for `my-topic` from this user. 154 | 155 | Now with the updated configuration let's run our producer and consumer again: 156 | 157 | ```shell 158 | kfk console-producer --topic my-topic -n kafka -c my-cluster --producer.config client.properties 159 | ``` 160 | 161 | ``` 162 | >message1 163 | >message2 164 | >message3 165 | > 166 | ``` 167 | 168 | It seems that we are able to produce messages to `my-topic`. Let's consume those messages then: 169 | 170 | ```shell 171 | kfk console-consumer --topic my-topic -n kafka -c my-cluster --consumer.config client.properties 172 | ``` 173 | 174 | ``` 175 | ERROR Error processing message, terminating consumer process: (kafka.tools.ConsoleConsumer$) 176 | org.apache.kafka.common.errors.GroupAuthorizationException: Not authorized to access group: console-consumer-96150 177 | Processed a total of 0 messages 178 | ``` 179 | 180 | Whoops! It did not work like the producer. But why? Because the consumer group that is randomly generated for us (because we did not define it anywhere) doesn't have at least `read` permission on `my-topic` topic. 181 | 182 | --- 183 | **IMPORTANT** 184 | 185 | In Apache Kafka, if you want to consume messages you have to do it via a consumer group. You might say that "we did not specify any consumer group while using the console consumer". Well just like the traditional console consumer of Kafka, it uses a randomly created consumer group id so you have a consumer group but it is created for you (like the one above as `console-consumer-96150`) since we did not define one previously. 186 | 187 | --- 188 | 189 | Ok then. Now let's add the ACL for a group in order to give `read` permission for `my-topic` topic. Let's call this group `my-group`, which we will also use it as the group id in our consumer client configuration. This time let's use `kfk acls` command which works like `kfk users --alter --add-acl` command. In order to give the best traditional experience to Strimzi CLI users, just like the traditional `bin/kafka-acls.sh` command, we have the `kfk acls` command which works mostly the same with the traditional one. 190 | 191 | With the following command, we give the `my-group` group the `read` right for consuming the messages. 192 | 193 | ```shell 194 | kfk acls --add --allow-principal User:my-user --group my-group --operation Read -n kafka -c my-cluster 195 | ``` 196 | 197 | After adding the ACL, let's check whether our user has the ACL for the group or not: 198 | 199 | ```shell 200 | kfk users --describe --user my-user -n kafka -c my-cluster -o yaml 201 | ``` 202 | 203 | In the `acls` section of the YAML you can see the entries are added: 204 | 205 | ``` 206 | - host: '*' 207 | operation: Read 208 | resource: 209 | name: my-group 210 | patternType: literal 211 | type: group 212 | type: allow 213 | ``` 214 | 215 | You can list the ACLs with the following command as well which lists all the ACLs Kafka natively: 216 | 217 | ```shell 218 | kfk acls --list -n kafka -c my-cluster 219 | ``` 220 | 221 | ``` 222 | Current ACLs for resource `ResourcePattern(resourceType=GROUP, name=my-group, patternType=LITERAL)`: 223 | (principal=User:CN=my-user, host=*, operation=READ, permissionType=ALLOW) 224 | 225 | Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=my-topic, patternType=LITERAL)`: 226 | (principal=User:CN=my-user, host=*, operation=ALL, permissionType=ALLOW) 227 | ``` 228 | 229 | Or you can list topic and group ACLs seperately: 230 | 231 | ```shell 232 | kfk acls --list --topic my-topic -n kafka -c my-cluster 233 | ``` 234 | 235 | ``` 236 | Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=my-topic, patternType=LITERAL)`: 237 | (principal=User:CN=my-user, host=*, operation=ALL, permissionType=ALLOW) 238 | ``` 239 | 240 | For the group ACLs: 241 | 242 | ```shell 243 | kfk acls --list --group my-group -n kafka -c my-cluster 244 | ``` 245 | 246 | ``` 247 | Current ACLs for resource `ResourcePattern(resourceType=GROUP, name=my-group, patternType=LITERAL)`: 248 | (principal=User:CN=my-user, host=*, operation=READ, permissionType=ALLOW) 249 | ``` 250 | 251 | 252 | The only thing we have to do right now is to put the group id definition in our `client.properties` file: 253 | 254 | ``` 255 | security.protocol=SSL 256 | ssl.truststore.location=./truststore.jks 257 | ssl.truststore.password=123456 258 | ssl.keystore.location=./user.p12 259 | ssl.keystore.password=123456 260 | group.id=my-group 261 | ``` 262 | 263 | Running the consumer again with the updated client configuration -this time consuming from the beginning- let's see the previously produced logs: 264 | 265 | ```shell 266 | kfk console-consumer --topic my-topic -n kafka -c my-cluster --consumer.config client.properties --from-beginning 267 | ``` 268 | 269 | ``` 270 | message1 271 | message2 272 | message3 273 | ``` 274 | 275 | Voilà! 276 | 277 | We are able to configure the Strimzi cluster for ACL authorization, define ACLs easily with different methods and use the client configurations successfully with Strimzi Kafka CLI. 278 | -------------------------------------------------------------------------------- /examples/5_connect/camel_es_connector.properties: -------------------------------------------------------------------------------- 1 | name=camel-elasticsearch-sink-demo 2 | tasks.max=1 3 | connector.class=org.apache.camel.kafkaconnector.elasticsearchrest.CamelElasticsearchrestSinkConnector 4 | 5 | value.converter=org.apache.kafka.connect.storage.StringConverter 6 | 7 | topics=twitter-status-connect 8 | camel.sink.endpoint.hostAddresses=elasticsearch-es-http:9200 9 | camel.sink.endpoint.indexName=tweets 10 | camel.sink.endpoint.operation=Index 11 | camel.sink.path.clusterName=elasticsearch 12 | errors.tolerance=all 13 | errors.log.enable=true 14 | errors.log.include.messages=true 15 | -------------------------------------------------------------------------------- /examples/5_connect/connect.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # These are defaults. This file just demonstrates how to override some settings. 17 | bootstrap.servers=my-cluster-kafka-bootstrap:9092 18 | 19 | # The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will 20 | # need to configure these based on the format they want their data in when loaded from or stored into Kafka 21 | key.converter=org.apache.kafka.connect.json.JsonConverter 22 | value.converter=org.apache.kafka.connect.json.JsonConverter 23 | # Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply 24 | # it to 25 | key.converter.schemas.enable=false 26 | value.converter.schemas.enable=false 27 | 28 | offset.storage.topic=connect-cluster-offsets 29 | config.storage.topic=connect-cluster-configs 30 | status.storage.topic=connect-cluster-status 31 | config.storage.replication.factor=1 32 | offset.storage.replication.factor=1 33 | status.storage.replication.factor=1 34 | 35 | # Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins 36 | # (connectors, converters, transformations). The list should consist of top level directories that include 37 | # any combination of: 38 | # a) directories immediately containing jars with plugins and their dependencies 39 | # b) uber-jars with plugins and their dependencies 40 | # c) directories immediately containing the package directory structure of classes of plugins and their dependencies 41 | # Note: symlinks will be followed to discover dependencies or plugins. 42 | # Examples: 43 | # plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, 44 | #plugin.path=connectors 45 | 46 | image=quay.io/systemcraftsman/demo-connect-cluster:latest 47 | plugin.url=https://github.com/jcustenborder/kafka-connect-twitter/releases/download/0.2.26/kafka-connect-twitter-0.2.26.tar.gz 48 | -------------------------------------------------------------------------------- /examples/5_connect/connect_v2.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # These are defaults. This file just demonstrates how to override some settings. 17 | bootstrap.servers=my-cluster-kafka-bootstrap:9092 18 | 19 | # The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will 20 | # need to configure these based on the format they want their data in when loaded from or stored into Kafka 21 | key.converter=org.apache.kafka.connect.json.JsonConverter 22 | value.converter=org.apache.kafka.connect.json.JsonConverter 23 | # Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply 24 | # it to 25 | key.converter.schemas.enable=false 26 | value.converter.schemas.enable=false 27 | 28 | offset.storage.topic=connect-cluster-offsets 29 | config.storage.topic=connect-cluster-configs 30 | status.storage.topic=connect-cluster-status 31 | config.storage.replication.factor=1 32 | offset.storage.replication.factor=1 33 | status.storage.replication.factor=1 34 | 35 | # Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins 36 | # (connectors, converters, transformations). The list should consist of top level directories that include 37 | # any combination of: 38 | # a) directories immediately containing jars with plugins and their dependencies 39 | # b) uber-jars with plugins and their dependencies 40 | # c) directories immediately containing the package directory structure of classes of plugins and their dependencies 41 | # Note: symlinks will be followed to discover dependencies or plugins. 42 | # Examples: 43 | # plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, 44 | #plugin.path=connectors 45 | 46 | image=quay.io/systemcraftsman/demo-connect-cluster:latest 47 | plugin.url=https://github.com/jcustenborder/kafka-connect-twitter/releases/download/0.2.26/kafka-connect-twitter-0.2.26.tar.gz,https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-elasticsearch-rest-kafka-connector/0.10.0/camel-elasticsearch-rest-kafka-connector-0.10.0-package.tar.gz 48 | -------------------------------------------------------------------------------- /examples/5_connect/elasticsearch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: elasticsearch.k8s.elastic.co/v1 2 | kind: Elasticsearch 3 | metadata: 4 | name: elasticsearch 5 | spec: 6 | auth: {} 7 | http: 8 | service: 9 | metadata: 10 | creationTimestamp: null 11 | spec: {} 12 | tls: 13 | certificate: {} 14 | selfSignedCertificate: 15 | disabled: true 16 | nodeSets: 17 | - config: 18 | node.data: true 19 | node.ingest: true 20 | node.master: true 21 | node.store.allow_mmap: false 22 | xpack.security.authc: 23 | anonymous: 24 | authz_exception: true 25 | roles: superuser 26 | count: 1 27 | name: default 28 | podTemplate: 29 | metadata: 30 | creationTimestamp: null 31 | labels: 32 | app.openshift.io/runtime: elasticsearch 33 | spec: 34 | containers: 35 | - env: 36 | - name: ES_JAVA_OPTS 37 | value: '-Xms1g -Xmx1g' 38 | name: elasticsearch 39 | resources: {} 40 | transport: 41 | service: 42 | metadata: 43 | creationTimestamp: null 44 | spec: {} 45 | updateStrategy: 46 | changeBudget: {} 47 | version: 7.12.1 48 | -------------------------------------------------------------------------------- /examples/5_connect/scripts/reset_example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Delete connect sources 3 | kfk connect connectors --delete --connector twitter-source-demo -c my-connect-cluster -n kafka 4 | kfk connect connectors --delete --connector camel-elasticsearch-sink-demo -c my-connect-cluster -n kafka 5 | kfk connect clusters --delete --cluster my-connect-cluster -n kafka -y 6 | 7 | #Delete topics 8 | kfk topics --delete --topic twitter-status-connect -c my-cluster -n kafka 9 | kfk topics --delete --topic twitter-deletes-connect -c my-cluster -n kafka 10 | 11 | #Optional (Kafka Cluster & ElasticSearch deletion) 12 | #kfk clusters --delete --cluster my-cluster -n kafka -y ; 13 | #oc delete -f elasticsearch.yaml -n kafka 14 | -------------------------------------------------------------------------------- /examples/5_connect/scripts/setup_example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Run this first if the cluster operator version is not compitable with the current Strimzi version 3 | #export STRIMZI_KAFKA_CLI_STRIMZI_VERSION=0.22.1 4 | kfk clusters --create --cluster my-cluster --replicas 2 --zk-replicas 1 -n kafka -y 5 | oc create -f ../elasticsearch.yaml -n kafka 6 | oc expose svc elasticsearch-es-http -n kafka 7 | -------------------------------------------------------------------------------- /examples/5_connect/twitter_connector.properties: -------------------------------------------------------------------------------- 1 | name=twitter-source-demo 2 | tasks.max=1 3 | connector.class=com.github.jcustenborder.kafka.connect.twitter.TwitterSourceConnector 4 | 5 | # Set these required values 6 | process.deletes=false 7 | filter.keywords=kafka 8 | kafka.status.topic=twitter-status-connect 9 | kafka.delete.topic=twitter-deletes-connect 10 | # put your own credentials here - don't share with anyone 11 | twitter.oauth.consumerKey=_YOUR_CONSUMER_KEY_ 12 | twitter.oauth.consumerSecret=_YOUR_CONSUMER_SECRET_ 13 | twitter.oauth.accessToken=_YOUR_ACCESS_TOKEN_ 14 | twitter.oauth.accessTokenSecret=_YOUR_ACCESS_TOKEN_SECRET_ 15 | -------------------------------------------------------------------------------- /kfk/argument_extensions.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | 4 | class NotRequiredIf(click.Argument): 5 | # TODO: Refactor here 6 | 7 | def __init__(self, *args, **kwargs): 8 | self.arguments = kwargs.pop("arguments") 9 | assert self.arguments, "'arguments' parameter required" 10 | super(NotRequiredIf, self).__init__(*args, **kwargs) 11 | 12 | def handle_parse_result(self, ctx, opts, args): 13 | control_arguments_exist = False 14 | 15 | for arguments in self.arguments: 16 | control_arguments_exist = arguments in opts 17 | if control_arguments_exist is True: 18 | break 19 | 20 | if control_arguments_exist: 21 | self.required = None 22 | 23 | return super(NotRequiredIf, self).handle_parse_result(ctx, opts, args) 24 | 25 | 26 | class RequiredIf(click.Argument): 27 | # TODO: Refactor here 28 | 29 | def __init__(self, *args, **kwargs): 30 | self.arguments = kwargs.pop("arguments") 31 | assert self.arguments, "'arguments' parameter required" 32 | super(RequiredIf, self).__init__(*args, **kwargs) 33 | 34 | def handle_parse_result(self, ctx, opts, args): 35 | control_arguments_exist = False 36 | 37 | for arguments in self.arguments: 38 | control_arguments_exist = arguments in opts 39 | if control_arguments_exist is True: 40 | break 41 | 42 | if control_arguments_exist: 43 | self.required = True 44 | else: 45 | self.required = None 46 | 47 | return super(RequiredIf, self).handle_parse_result(ctx, opts, args) 48 | -------------------------------------------------------------------------------- /kfk/commands/acls.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | 5 | from kfk.commands import users 6 | from kfk.commands.main import kfk 7 | from kfk.commons import raise_exception_for_missing_options 8 | from kfk.constants import COLON 9 | from kfk.kubectl_command_builder import Kubectl 10 | from kfk.option_extensions import NotRequiredIf 11 | 12 | 13 | @click.option("-n", "--namespace", help="Namespace to use.", required=True) 14 | @click.option("-c", "--kafka-cluster", help="Cluster to use.", required=True) 15 | @click.option("--remove", help="Indicates you are trying to remove ACLs.", is_flag=True) 16 | @click.option( 17 | "--resource-pattern-type", 18 | help=( 19 | "The type of the resource pattern or pattern" 20 | " filter. When adding acls, this should be a specific pattern type, e.g." 21 | " 'literal' or 'prefixed'. (default: literal)" 22 | ), 23 | default="literal", 24 | ) 25 | @click.option( 26 | "--deny-host", 27 | help="Host which User will not have access. (default: *)", 28 | default="*", 29 | ) 30 | @click.option( 31 | "--allow-host", help="Host which User will have access. (default: *)", default="*" 32 | ) 33 | @click.option( 34 | "--operation", 35 | "operation_tuple", 36 | help="Operation that is being allowed or denied. (default: All)", 37 | default=["All"], 38 | multiple=True, 39 | ) 40 | @click.option( 41 | "--deny-principal", 42 | help=( 43 | "principal is in principalType:name format. By default anyone not added through" 44 | " --allow-principal is denied access. You only need to use this option as" 45 | " negation to already allowed set." 46 | ), 47 | required=True, 48 | cls=NotRequiredIf, 49 | options=["list", "allow_principal"], 50 | ) 51 | @click.option( 52 | "--allow-principal", 53 | help=( 54 | "principal is in principalType:name principal format. Note that principalType" 55 | " must be supported by the Authorizer being used." 56 | ), 57 | required=True, 58 | cls=NotRequiredIf, 59 | options=["list", "deny_principal"], 60 | ) 61 | @click.option("--add", help="Indicates you are trying to add ACLs.", is_flag=True) 62 | @click.option("--group", help="Consumer Group ACLs.") 63 | @click.option("--cluster", help="Cluster ACLs.") 64 | @click.option("--topic", help="Topic ACLs.") 65 | @click.option( 66 | "--list", 67 | help=( 68 | "List ACLs for the specified resource, use --topic or --group " 69 | " or --cluster to specify a resource." 70 | ), 71 | is_flag=True, 72 | ) 73 | @kfk.command() 74 | def acls( 75 | list, 76 | topic, 77 | cluster, 78 | group, 79 | add, 80 | allow_principal, 81 | deny_principal, 82 | operation_tuple, 83 | allow_host, 84 | deny_host, 85 | resource_pattern_type, 86 | remove, 87 | kafka_cluster, 88 | namespace, 89 | ): 90 | """Manages ACLs on Kafka.""" 91 | if list: 92 | native_command = ( 93 | "bin/kafka-acls.sh --authorizer-properties" 94 | " zookeeper.connect=localhost:12181 --list {topic}{cluster} {group}" 95 | ) 96 | os.system( 97 | Kubectl() 98 | .exec("-it", "{kafka_cluster}-zookeeper-0") 99 | .container("zookeeper") 100 | .namespace(namespace) 101 | .exec_command(native_command) 102 | .build() 103 | .format( 104 | kafka_cluster=kafka_cluster, 105 | topic=(topic and "--topic " + topic or ""), 106 | cluster=(cluster and "--cluster " + cluster or ""), 107 | group=(group and "--group " + group or ""), 108 | ) 109 | ) 110 | elif add or remove: 111 | add_or_remove( 112 | topic, 113 | cluster, 114 | group, 115 | add, 116 | remove, 117 | allow_principal, 118 | deny_principal, 119 | operation_tuple, 120 | allow_host, 121 | deny_host, 122 | resource_pattern_type, 123 | kafka_cluster, 124 | namespace, 125 | ) 126 | else: 127 | raise_exception_for_missing_options("acls") 128 | 129 | 130 | def add_or_remove( 131 | topic, 132 | cluster, 133 | group, 134 | add, 135 | remove, 136 | allow_principal, 137 | deny_principal, 138 | operation_tuple, 139 | allow_host, 140 | deny_host, 141 | resource_pattern_type, 142 | kafka_cluster, 143 | namespace, 144 | ): 145 | resource_type_dict = _get_resource_type_dict(topic, cluster, group) 146 | 147 | if allow_principal: 148 | type = "allow" 149 | # TODO: click exception here 150 | allow_principal_arr = allow_principal.split(COLON) 151 | principal_type = allow_principal_arr[0] 152 | principal_name = allow_principal_arr[1] 153 | host = allow_host 154 | else: 155 | type = "deny" 156 | # TODO: click exception here 157 | deny_principal_arr = deny_principal.split(COLON) 158 | principal_type = deny_principal_arr[0] 159 | principal_name = deny_principal_arr[1] 160 | host = deny_host 161 | 162 | if principal_type == "User": 163 | for resource_type, resource_name in resource_type_dict.items(): 164 | users.alter( 165 | principal_name, 166 | None, 167 | "simple", 168 | add, 169 | remove, 170 | operation_tuple, 171 | host, 172 | type, 173 | resource_type, 174 | resource_name, 175 | resource_pattern_type, 176 | tuple(), 177 | tuple(), 178 | kafka_cluster, 179 | namespace, 180 | ) 181 | 182 | 183 | def _get_resource_type_dict(topic, cluster, group): 184 | resource_type_dict = {} 185 | 186 | if topic is not None: 187 | resource_type_dict["topic"] = topic 188 | if cluster is not None: 189 | resource_type_dict["cluster"] = cluster 190 | if group is not None: 191 | resource_type_dict["group"] = group 192 | 193 | return resource_type_dict 194 | -------------------------------------------------------------------------------- /kfk/commands/clusters.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | import yaml 5 | 6 | from kfk.commands.main import kfk 7 | from kfk.commons import ( 8 | add_kv_config_to_resource, 9 | create_temp_file, 10 | delete_last_applied_configuration, 11 | delete_resource_config, 12 | get_resource_as_stream, 13 | open_file_in_system_editor, 14 | raise_exception_for_missing_options, 15 | ) 16 | from kfk.config import STRIMZI_PATH, STRIMZI_VERSION 17 | from kfk.kubectl_command_builder import Kubectl 18 | from kfk.kubernetes_commons import ( 19 | create_using_yaml, 20 | delete_using_yaml, 21 | replace_using_yaml, 22 | ) 23 | from kfk.messages import Messages 24 | from kfk.option_extensions import NotRequiredIf 25 | 26 | 27 | @click.option("-y", "--yes", "is_yes", help='"Yes" confirmation', is_flag=True) 28 | @click.option( 29 | "-n", 30 | "--namespace", 31 | help="Namespace to use", 32 | required=True, 33 | cls=NotRequiredIf, 34 | options=["is_list"], 35 | ) 36 | @click.option( 37 | "--delete-config", 38 | help="A cluster configuration override to be removed for an existing cluster", 39 | multiple=True, 40 | ) 41 | @click.option( 42 | "--config", 43 | help="A cluster configuration override for the cluster being altered.", 44 | multiple=True, 45 | ) 46 | @click.option("--alter", "is_alter", help="Alters the Kafka cluster.", is_flag=True) 47 | @click.option("--delete", "is_delete", help="Deletes the Kafka cluster.", is_flag=True) 48 | @click.option( 49 | "--zk-replicas", help="The number of zookeeper replicas for the cluster.", type=int 50 | ) 51 | @click.option( 52 | "--replicas", help="The number of broker replicas for the cluster.", type=int 53 | ) 54 | @click.option("--create", "is_create", help="Creates a Kafka cluster.", is_flag=True) 55 | @click.option( 56 | "--describe", 57 | "is_describe", 58 | help="Lists details for the given cluster.", 59 | is_flag=True, 60 | ) 61 | @click.option( 62 | "-o", 63 | "--output", 64 | help=( 65 | "Output format. One of:" 66 | " json|yaml|name|go-template|go-template-file|template|templatefile|jsonpath" 67 | "|jsonpath-file." 68 | ), 69 | ) 70 | @click.option( 71 | "--list", 72 | "is_list", 73 | help="List all available clusters.", 74 | required=True, 75 | is_flag=True, 76 | ) 77 | @click.option( 78 | "--cluster", 79 | help="Cluster Name", 80 | required=True, 81 | cls=NotRequiredIf, 82 | options=["is_list"], 83 | ) 84 | @kfk.command() 85 | def clusters( 86 | cluster, 87 | is_list, 88 | is_create, 89 | replicas, 90 | zk_replicas, 91 | is_describe, 92 | is_delete, 93 | is_alter, 94 | config, 95 | delete_config, 96 | output, 97 | namespace, 98 | is_yes, 99 | ): 100 | """Creates, alters, deletes, describes Kafka cluster(s).""" 101 | if is_list: 102 | list(namespace) 103 | elif is_create: 104 | create(cluster, replicas, zk_replicas, config, namespace, is_yes) 105 | elif is_describe: 106 | describe(cluster, output, namespace) 107 | elif is_delete: 108 | delete(cluster, namespace, is_yes) 109 | elif is_alter: 110 | alter(cluster, replicas, zk_replicas, config, delete_config, namespace) 111 | else: 112 | raise_exception_for_missing_options("clusters") 113 | 114 | 115 | def list(namespace): 116 | os.system(Kubectl().get().kafkas().namespace(namespace).build()) 117 | 118 | 119 | def create(cluster, replicas, zk_replicas, config, namespace, is_yes): 120 | with open( 121 | "{strimzi_path}/examples/kafka/kafka-ephemeral.yaml".format( 122 | strimzi_path=STRIMZI_PATH 123 | ).format(version=STRIMZI_VERSION) 124 | ) as file: 125 | stream = file.read() 126 | 127 | cluster_dict = yaml.full_load(stream) 128 | 129 | cluster_dict["metadata"]["name"] = cluster 130 | 131 | _update_replicas(replicas, zk_replicas, cluster_dict) 132 | 133 | _add_config_if_provided(config, cluster_dict) 134 | 135 | cluster_yaml = yaml.dump(cluster_dict) 136 | cluster_temp_file = create_temp_file(cluster_yaml) 137 | 138 | if is_yes: 139 | is_confirmed = True 140 | else: 141 | open_file_in_system_editor(cluster_temp_file.name) 142 | is_confirmed = click.confirm(Messages.CLUSTER_CREATE_CONFIRMATION) 143 | if is_confirmed: 144 | create_using_yaml(cluster_temp_file.name, namespace) 145 | 146 | cluster_temp_file.close() 147 | 148 | 149 | def describe(cluster, output, namespace): 150 | if output is not None: 151 | os.system( 152 | Kubectl().get().kafkas(cluster).namespace(namespace).output(output).build() 153 | ) 154 | else: 155 | os.system(Kubectl().describe().kafkas(cluster).namespace(namespace).build()) 156 | 157 | 158 | def delete(cluster, namespace, is_yes): 159 | if is_yes: 160 | is_confirmed = True 161 | else: 162 | is_confirmed = click.confirm(Messages.CLUSTER_DELETE_CONFIRMATION) 163 | if is_confirmed: 164 | with open( 165 | "{strimzi_path}/examples/kafka/kafka-ephemeral.yaml".format( 166 | strimzi_path=STRIMZI_PATH 167 | ).format(version=STRIMZI_VERSION) 168 | ) as file: 169 | stream = file.read() 170 | 171 | cluster_dict = yaml.full_load(stream) 172 | 173 | cluster_dict["metadata"]["name"] = cluster 174 | 175 | cluster_yaml = yaml.dump(cluster_dict) 176 | cluster_temp_file = create_temp_file(cluster_yaml) 177 | 178 | delete_using_yaml(cluster_temp_file.name, namespace) 179 | 180 | cluster_temp_file.close() 181 | 182 | 183 | def alter(cluster, replicas, zk_replicas, config, delete_config, namespace): 184 | if ( 185 | len(config) > 0 186 | or len(delete_config) > 0 187 | or replicas is not None 188 | or zk_replicas is not None 189 | ): 190 | stream = get_resource_as_stream( 191 | "kafkas", resource_name=cluster, namespace=namespace 192 | ) 193 | cluster_dict = yaml.full_load(stream) 194 | 195 | delete_last_applied_configuration(cluster_dict) 196 | 197 | _update_replicas(replicas, zk_replicas, cluster_dict) 198 | 199 | _add_config_if_provided(config, cluster_dict) 200 | 201 | if len(delete_config) > 0: 202 | if cluster_dict["spec"]["kafka"].get("config") is not None: 203 | delete_resource_config( 204 | delete_config, cluster_dict["spec"]["kafka"]["config"] 205 | ) 206 | 207 | cluster_yaml = yaml.dump(cluster_dict) 208 | cluster_temp_file = create_temp_file(cluster_yaml) 209 | 210 | replace_using_yaml(cluster_temp_file.name, namespace) 211 | 212 | cluster_temp_file.close() 213 | else: 214 | os.system(Kubectl().edit().kafkas(cluster).namespace(namespace).build()) 215 | 216 | 217 | def _update_replicas(replicas, zk_replicas, cluster_dict): 218 | if replicas is not None: 219 | cluster_dict["spec"]["kafka"]["replicas"] = int(replicas) 220 | min_insync_replicas = 1 221 | if replicas > 1: 222 | min_insync_replicas = replicas - 1 223 | cluster_dict["spec"]["kafka"]["config"]["offsets.topic.replication.factor"] = ( 224 | int(replicas) 225 | ) 226 | cluster_dict["spec"]["kafka"]["config"][ 227 | "transaction.state.log.replication.factor" 228 | ] = int(replicas) 229 | cluster_dict["spec"]["kafka"]["config"]["default.replication.factor"] = int( 230 | replicas 231 | ) 232 | cluster_dict["spec"]["kafka"]["config"][ 233 | "transaction.state.log.min.isr" 234 | ] = min_insync_replicas 235 | cluster_dict["spec"]["kafka"]["config"][ 236 | "min.insync.replicas" 237 | ] = min_insync_replicas 238 | 239 | if zk_replicas is not None: 240 | cluster_dict["spec"]["zookeeper"]["replicas"] = int(zk_replicas) 241 | 242 | 243 | def _add_config_if_provided(config, cluster_dict): 244 | if len(config) > 0: 245 | if cluster_dict["spec"]["kafka"].get("config") is None: 246 | cluster_dict["spec"]["kafka"]["config"] = {} 247 | add_kv_config_to_resource(config, cluster_dict["spec"]["kafka"]["config"]) 248 | -------------------------------------------------------------------------------- /kfk/commands/configs.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | import yaml 5 | 6 | from kfk.commands import clusters, topics, users 7 | from kfk.commands.main import kfk 8 | from kfk.commons import ( 9 | get_config_list, 10 | get_list_by_split_string, 11 | get_resource_as_stream, 12 | raise_exception_for_missing_options, 13 | ) 14 | from kfk.constants import COMMON_NAME_PREFIX, KAFKA_PORT, NEW_LINE, SPACE, SpecialTexts 15 | from kfk.kubectl_command_builder import Kubectl 16 | from kfk.messages import Messages 17 | from kfk.option_extensions import NotRequiredIf 18 | 19 | 20 | @click.option("-n", "--namespace", help="Namespace to use", required=True) 21 | @click.option("-c", "--cluster", help="Cluster to use", required=True) 22 | @click.option("--delete-config", help="Config keys to remove") 23 | @click.option("--add-config", help="Key Value pairs of configs to add.") 24 | @click.option("--alter", help="Alter the configuration for the entity.", is_flag=True) 25 | @click.option( 26 | "--native", help="List configs for the given entity natively.", is_flag=True 27 | ) 28 | @click.option("--describe", help="List configs for the given entity.", is_flag=True) 29 | @click.option( 30 | "--entity-name", 31 | help="Name of entity", 32 | required=True, 33 | cls=NotRequiredIf, 34 | options=["native"], 35 | ) 36 | @click.option( 37 | "--entity-type", 38 | help="Type of entity (topics/users/brokers)", 39 | type=click.Choice(["topics", "users", "brokers"], case_sensitive=True), 40 | ) 41 | @kfk.command() 42 | def configs( 43 | entity_type, 44 | entity_name, 45 | describe, 46 | native, 47 | alter, 48 | add_config, 49 | delete_config, 50 | cluster, 51 | namespace, 52 | ): 53 | """Adds/Removes entity config for a topic, client, user or brokers.""" 54 | if describe: 55 | if entity_type == "topics": 56 | if native: 57 | _describe_natively(entity_type, entity_name, cluster, namespace) 58 | else: 59 | topics.describe(entity_name, None, False, None, cluster, namespace) 60 | elif entity_type == "users": 61 | if native: 62 | _describe_natively(entity_type, entity_name, cluster, namespace) 63 | else: 64 | users.describe(entity_name, None, cluster, namespace) 65 | elif entity_type == "brokers": 66 | if native: 67 | _describe_natively(entity_type, entity_name, cluster, namespace) 68 | stream = get_resource_as_stream( 69 | "configmap", 70 | resource_name=cluster + "-kafka-config", 71 | namespace=namespace, 72 | ) 73 | config_dict = yaml.full_load(stream) 74 | config_data = get_list_by_split_string( 75 | config_dict["data"]["server.config"], 76 | SpecialTexts.BROKER_CONFIG_FILE_USER_CONFIG_HEADER, 77 | )[1] 78 | click.echo( 79 | NEW_LINE 80 | + Messages.USER_PROVIDED_CONFIG_HEADER 81 | + NEW_LINE 82 | + config_data 83 | ) 84 | else: 85 | clusters.describe(cluster, None, namespace) 86 | 87 | elif alter: 88 | add_config_list = get_config_list(add_config) 89 | delete_config_list = get_config_list(delete_config) 90 | 91 | if entity_type == "topics": 92 | topics.alter( 93 | entity_name, 94 | None, 95 | None, 96 | add_config_list, 97 | delete_config_list, 98 | cluster, 99 | namespace, 100 | ) 101 | elif entity_type == "users": 102 | users.alter( 103 | entity_name, 104 | None, 105 | None, 106 | False, 107 | False, 108 | tuple(), 109 | None, 110 | None, 111 | None, 112 | None, 113 | None, 114 | add_config_list, 115 | delete_config_list, 116 | cluster, 117 | namespace, 118 | ) 119 | elif entity_type == "brokers": 120 | clusters.alter( 121 | entity_name, None, None, add_config_list, delete_config_list, namespace 122 | ) 123 | else: 124 | raise_exception_for_missing_options("configs") 125 | 126 | 127 | def _describe_natively(entity_type, entity_name, cluster, namespace): 128 | native_command = ( 129 | "bin/kafka-configs.sh --bootstrap-server {cluster}-kafka-brokers:{port} " 130 | "--entity-type {entity_type} --describe" 131 | ) 132 | 133 | if entity_name is not None: 134 | native_command = native_command + SPACE + "--entity-name {entity_name}" 135 | 136 | if entity_type == "users": 137 | entity_name = COMMON_NAME_PREFIX + entity_name 138 | 139 | os.system( 140 | Kubectl() 141 | .exec("-it", cluster + "-kafka-0") 142 | .container("kafka") 143 | .namespace(namespace) 144 | .exec_command(native_command) 145 | .build() 146 | .format( 147 | cluster=cluster, 148 | port=KAFKA_PORT, 149 | entity_type=entity_type, 150 | entity_name=entity_name, 151 | ) 152 | ) 153 | -------------------------------------------------------------------------------- /kfk/commands/connect/__init__.py: -------------------------------------------------------------------------------- 1 | from kfk.commands.main import kfk 2 | 3 | 4 | @kfk.group() 5 | def connect(): 6 | """Creates, alters, deletes, describes Kafka Connect cluster(s) or its 7 | connectors.""" 8 | -------------------------------------------------------------------------------- /kfk/commands/connect/connectors.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | import yaml 5 | 6 | from kfk import argument_extensions, option_extensions 7 | from kfk.commands.connect import connect 8 | from kfk.commons import ( 9 | add_properties_config_to_resource, 10 | create_temp_file, 11 | delete_last_applied_configuration, 12 | get_properties_from_file, 13 | get_resource_as_stream, 14 | raise_exception_for_missing_options, 15 | ) 16 | from kfk.config import STRIMZI_PATH, STRIMZI_VERSION 17 | from kfk.constants import SpecialTexts 18 | from kfk.kubectl_command_builder import Kubectl 19 | from kfk.kubernetes_commons import ( 20 | create_using_yaml, 21 | delete_using_yaml, 22 | replace_using_yaml, 23 | ) 24 | 25 | CONNECTOR_SKIPPED_PROPERTIES = ( 26 | SpecialTexts.CONNECTOR_NAME, 27 | SpecialTexts.CONNECTOR_TASKS_MAX, 28 | SpecialTexts.CONNECTOR_CLASS, 29 | ) 30 | 31 | 32 | @click.option("-n", "--namespace", help="Namespace to use", required=True) 33 | @click.option("-c", "--cluster", help="Connect cluster to use", required=True) 34 | @click.option("--alter", "is_alter", help="Alter the connector.", is_flag=True) 35 | @click.option("--delete", "is_delete", help="Delete the connector.", is_flag=True) 36 | @click.option( 37 | "-o", 38 | "--output", 39 | help=( 40 | "Output format. One of:" 41 | " json|yaml|name|go-template|go-template-file|template|templatefile|jsonpath" 42 | "|jsonpath-file." 43 | ), 44 | ) 45 | @click.option( 46 | "--describe", 47 | "is_describe", 48 | help="List details for the given connector.", 49 | is_flag=True, 50 | ) 51 | @click.argument( 52 | "config_file", 53 | type=click.File("r"), 54 | cls=argument_extensions.NotRequiredIf, 55 | arguments=["is_describe", "is_delete", "is_list"], 56 | ) 57 | @click.option("--create", "is_create", help="Create a new connector.", is_flag=True) 58 | @click.option("--list", "is_list", help="List all available connectors.", is_flag=True) 59 | @click.option( 60 | "--connector", 61 | help="Connector Name", 62 | cls=option_extensions.RequiredIf, 63 | options=["is_describe", "is_delete"], 64 | ) 65 | @connect.command() 66 | def connectors( 67 | connector, 68 | is_list, 69 | is_create, 70 | config_file, 71 | is_describe, 72 | output, 73 | is_delete, 74 | is_alter, 75 | cluster, 76 | namespace, 77 | ): 78 | """Creates, alters, deletes, describes Kafka Connect connector(s).""" 79 | if is_list: 80 | list(cluster, namespace) 81 | elif is_create: 82 | create(config_file, cluster, namespace) 83 | elif is_describe: 84 | describe(connector, output, namespace) 85 | elif is_delete: 86 | delete(connector, cluster, namespace) 87 | elif is_alter: 88 | alter(config_file, cluster, namespace) 89 | else: 90 | raise_exception_for_missing_options("connectors") 91 | 92 | 93 | def list(cluster, namespace): 94 | os.system( 95 | Kubectl() 96 | .get() 97 | .kafkaconnectors() 98 | .label("strimzi.io/cluster={cluster}") 99 | .namespace(namespace) 100 | .build() 101 | .format(cluster=cluster) 102 | ) 103 | 104 | 105 | def create(config_file, cluster, namespace): 106 | with open( 107 | "{strimzi_path}/examples/connect/source-connector.yaml".format( 108 | strimzi_path=STRIMZI_PATH 109 | ).format(version=STRIMZI_VERSION) 110 | ) as file: 111 | connector_dict = yaml.full_load(file) 112 | 113 | connector_properties = get_properties_from_file(config_file) 114 | 115 | connector_dict["metadata"]["name"] = connector_properties.get( 116 | SpecialTexts.CONNECTOR_NAME 117 | ).data 118 | connector_dict["metadata"]["labels"]["strimzi.io/cluster"] = cluster 119 | 120 | connector_dict["spec"]["class"] = connector_properties.get( 121 | SpecialTexts.CONNECTOR_CLASS 122 | ).data 123 | connector_dict["spec"]["tasksMax"] = int( 124 | connector_properties.get(SpecialTexts.CONNECTOR_TASKS_MAX).data 125 | ) 126 | connector_dict["spec"]["config"] = {} 127 | 128 | add_properties_config_to_resource( 129 | connector_properties, 130 | connector_dict["spec"]["config"], 131 | _return_if_not_skipped, 132 | ) 133 | 134 | connector_yaml = yaml.dump(connector_dict) 135 | connector_temp_file = create_temp_file(connector_yaml) 136 | 137 | create_using_yaml(connector_temp_file.name, namespace) 138 | 139 | connector_temp_file.close() 140 | 141 | 142 | def describe(connector, output, namespace): 143 | if output is not None: 144 | os.system( 145 | Kubectl() 146 | .get() 147 | .kafkaconnectors(connector) 148 | .namespace(namespace) 149 | .output(output) 150 | .build() 151 | ) 152 | else: 153 | os.system( 154 | Kubectl().describe().kafkaconnectors(connector).namespace(namespace).build() 155 | ) 156 | 157 | 158 | def delete(connector, cluster, namespace): 159 | with open( 160 | "{strimzi_path}/examples/connect/source-connector.yaml".format( 161 | strimzi_path=STRIMZI_PATH 162 | ).format(version=STRIMZI_VERSION) 163 | ) as file: 164 | connector_dict = yaml.full_load(file) 165 | 166 | connector_dict["metadata"]["name"] = connector 167 | connector_dict["metadata"]["labels"]["strimzi.io/cluster"] = cluster 168 | 169 | connector_yaml = yaml.dump(connector_dict) 170 | connector_temp_file = create_temp_file(connector_yaml) 171 | 172 | delete_using_yaml(connector_temp_file.name, namespace) 173 | 174 | connector_temp_file.close() 175 | 176 | 177 | def alter(config_file, cluster, namespace): 178 | connector_properties = get_properties_from_file(config_file) 179 | 180 | stream = get_resource_as_stream( 181 | "kafkaconnectors", 182 | resource_name=connector_properties.get(SpecialTexts.CONNECTOR_NAME).data, 183 | namespace=namespace, 184 | ) 185 | 186 | connector_dict = yaml.full_load(stream) 187 | 188 | delete_last_applied_configuration(connector_dict) 189 | 190 | connector_dict["spec"]["class"] = connector_properties.get( 191 | SpecialTexts.CONNECTOR_CLASS 192 | ).data 193 | connector_dict["spec"]["tasksMax"] = int( 194 | connector_properties.get(SpecialTexts.CONNECTOR_TASKS_MAX).data 195 | ) 196 | 197 | connector_dict["spec"]["config"] = {} 198 | 199 | add_properties_config_to_resource( 200 | connector_properties, connector_dict["spec"]["config"], _return_if_not_skipped 201 | ) 202 | 203 | connector_yaml = yaml.dump(connector_dict) 204 | connector_temp_file = create_temp_file(connector_yaml) 205 | 206 | replace_using_yaml(connector_temp_file.name, namespace) 207 | 208 | connector_temp_file.close() 209 | 210 | 211 | def _return_if_not_skipped(property_item): 212 | if property_item[0] not in CONNECTOR_SKIPPED_PROPERTIES: 213 | return property_item 214 | else: 215 | return None 216 | -------------------------------------------------------------------------------- /kfk/commands/console.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | 5 | from kfk.commands.main import kfk 6 | from kfk.commons import apply_client_config_from_file 7 | from kfk.constants import KAFKA_PORT 8 | from kfk.kubectl_command_builder import Kubectl 9 | 10 | 11 | @click.option("-n", "--namespace", help="Namespace to use", required=True) 12 | @click.option("-c", "--cluster", help="Cluster to use", required=True) 13 | @click.option("--from-beginning", help="Consumes messages from beginning", is_flag=True) 14 | @click.option( 15 | "--consumer.config", "consumer_config", help="Consumer config properties file." 16 | ) 17 | @click.option("--topic", help="Topic Name", required=True) 18 | @kfk.command() 19 | def console_consumer(topic, consumer_config, from_beginning, cluster, namespace): 20 | """Reads data from Kafka topics and outputs it to standard output.""" 21 | native_command = ( 22 | "bin/kafka-console-consumer.sh --bootstrap-server" 23 | " {cluster}-kafka-brokers:{port} --topic {topic} {from_beginning}" 24 | ) 25 | pod = cluster + "-kafka-0" 26 | container = "kafka" 27 | if consumer_config is not None: 28 | native_command = apply_client_config_from_file( 29 | native_command, 30 | consumer_config, 31 | "--consumer.config", 32 | container, 33 | pod, 34 | namespace, 35 | ) 36 | os.system( 37 | Kubectl() 38 | .exec("-it", pod) 39 | .container(container) 40 | .namespace(namespace) 41 | .exec_command(native_command) 42 | .build() 43 | .format( 44 | port=KAFKA_PORT, 45 | topic=topic, 46 | cluster=cluster, 47 | from_beginning=(from_beginning and "--from-beginning" or ""), 48 | ) 49 | ) 50 | 51 | 52 | @click.option("-n", "--namespace", help="Namespace to use", required=True) 53 | @click.option("-c", "--cluster", help="Cluster to use", required=True) 54 | @click.option( 55 | "--producer.config", "producer_config", help="Producer config properties file." 56 | ) 57 | @click.option("--topic", help="Topic Name", required=True) 58 | @kfk.command() 59 | def console_producer(topic, producer_config, cluster, namespace): 60 | """Reads data from standard input and publish it to Kafka.""" 61 | native_command = ( 62 | "bin/kafka-console-producer.sh --broker-list {cluster}-kafka-brokers:{port}" 63 | " --topic {topic}" 64 | ) 65 | pod = cluster + "-kafka-0" 66 | container = "kafka" 67 | if producer_config is not None: 68 | native_command = apply_client_config_from_file( 69 | native_command, 70 | producer_config, 71 | "--producer.config", 72 | container, 73 | pod, 74 | namespace, 75 | ) 76 | os.system( 77 | Kubectl() 78 | .exec("-it", pod) 79 | .container(container) 80 | .namespace(namespace) 81 | .exec_command(native_command) 82 | .build() 83 | .format(port=KAFKA_PORT, topic=topic, cluster=cluster) 84 | ) 85 | -------------------------------------------------------------------------------- /kfk/commands/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | 5 | from kfk.commands.main import kfk 6 | 7 | 8 | @kfk.command() 9 | def env(): 10 | """Prints the environment variable values for Strimzi Kafka CLI.""" 11 | 12 | click.echo( 13 | "STRIMZI_KAFKA_CLI_BASE_PATH: {}".format( 14 | os.environ.get("STRIMZI_KAFKA_CLI_BASE_PATH") 15 | ) 16 | ) 17 | click.echo( 18 | "STRIMZI_KAFKA_CLI_STRIMZI_VERSION: {}".format( 19 | os.environ.get("STRIMZI_KAFKA_CLI_STRIMZI_VERSION") 20 | ) 21 | ) 22 | click.echo( 23 | "STRIMZI_KAFKA_CLI_STRIMZI_PATH: {}".format( 24 | os.environ.get("STRIMZI_KAFKA_CLI_STRIMZI_PATH") 25 | ) 26 | ) 27 | click.echo( 28 | "STRIMZI_KAFKA_CLI_KUBECTL_VERSION: {}".format( 29 | os.environ.get("STRIMZI_KAFKA_CLI_KUBECTL_VERSION") 30 | ) 31 | ) 32 | click.echo( 33 | "STRIMZI_KAFKA_CLI_KUBECTL_PATH: {}".format( 34 | os.environ.get("STRIMZI_KAFKA_CLI_KUBECTL_PATH") 35 | ) 36 | ) 37 | -------------------------------------------------------------------------------- /kfk/commands/main.py: -------------------------------------------------------------------------------- 1 | from importlib.metadata import version 2 | 3 | import click 4 | 5 | from kfk.config import KUBECTL_VERSION, STRIMZI_VERSION 6 | 7 | version = f"""CLI Version: {version("strimzi-kafka-cli")} 8 | Strimzi Version: {STRIMZI_VERSION} 9 | Kubectl Version: {KUBECTL_VERSION}""" 10 | 11 | 12 | # @click.option('-v', '--version', help='Prints the version', is_flag=True) 13 | @click.version_option(version=1, message=version) 14 | @click.group(name="kfk") 15 | def kfk(): 16 | """Strimzi Kafka CLI.""" 17 | 18 | 19 | if __name__ == "__main__": 20 | kfk() 21 | -------------------------------------------------------------------------------- /kfk/commands/operator.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | 5 | from kfk.commands.main import kfk 6 | from kfk.commons import create_temp_file, raise_exception_for_missing_options 7 | from kfk.config import STRIMZI_PATH 8 | from kfk.constants import SpecialTexts 9 | from kfk.kubernetes_commons import create_using_yaml, delete_using_yaml 10 | 11 | 12 | @click.option("-n", "--namespace", help="Namespace to use", required=True) 13 | @click.option( 14 | "--uninstall", 15 | "is_uninstall", 16 | help="Uninstalls Strimzi Kafka Operator", 17 | is_flag=True, 18 | ) 19 | @click.option( 20 | "--install", "is_install", help="Installs Strimzi Kafka Operator", is_flag=True 21 | ) 22 | @kfk.command() 23 | def operator(is_install, is_uninstall, namespace): 24 | """Installs/Uninstalls Strimzi Kafka Operator.""" 25 | 26 | for directory_name, dirs, files in os.walk( 27 | "{strimzi_path}/install/cluster-operator/".format(strimzi_path=STRIMZI_PATH) 28 | ): 29 | for file_name in files: 30 | file_path = os.path.join(directory_name, file_name) 31 | 32 | if SpecialTexts.OPERATOR_ROLE_BINDING in file_name: 33 | with open(file_path) as file: 34 | stream = file.read().replace( 35 | SpecialTexts.OPERATOR_MY_PROJECT, namespace 36 | ) 37 | temp_file = create_temp_file(stream) 38 | file_path = temp_file.name 39 | if is_install: 40 | create_using_yaml(file_path, namespace) 41 | elif is_uninstall: 42 | delete_using_yaml(file_path, namespace) 43 | else: 44 | raise_exception_for_missing_options("operator") 45 | break 46 | -------------------------------------------------------------------------------- /kfk/commands/topics.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import click 4 | import yaml 5 | 6 | from kfk.commands.main import kfk 7 | from kfk.commons import ( 8 | add_kv_config_to_resource, 9 | apply_client_config_from_file, 10 | create_temp_file, 11 | delete_last_applied_configuration, 12 | delete_resource_config, 13 | get_resource_as_stream, 14 | raise_exception_for_missing_options, 15 | ) 16 | from kfk.config import STRIMZI_PATH, STRIMZI_VERSION 17 | from kfk.constants import KAFKA_PORT 18 | from kfk.kubectl_command_builder import Kubectl 19 | from kfk.kubernetes_commons import ( 20 | create_using_yaml, 21 | delete_using_yaml, 22 | replace_using_yaml, 23 | ) 24 | from kfk.option_extensions import NotRequiredIf, RequiredIf 25 | 26 | 27 | @click.option("-n", "--namespace", help="Namespace to use", required=True) 28 | @click.option("-c", "--cluster", help="Cluster to use", required=True) 29 | @click.option( 30 | "--delete-config", 31 | help="A topic configuration override to be removed for an existing topic", 32 | multiple=True, 33 | ) 34 | @click.option( 35 | "--config", 36 | help="A topic configuration override for the topic being created or altered.", 37 | multiple=True, 38 | ) 39 | @click.option( 40 | "--alter", 41 | "is_alter", 42 | help=( 43 | "Alter the number of partitions, replica assignment, and/or configuration of" 44 | " the topic." 45 | ), 46 | is_flag=True, 47 | ) 48 | @click.option("--delete", "is_delete", help="Delete a topic.", is_flag=True) 49 | @click.option( 50 | "--command-config", 51 | help=( 52 | "Property file containing configs to be config property file passed to Admin" 53 | " Client." 54 | ), 55 | ) 56 | @click.option( 57 | "--native", 58 | help="List details for the given topic natively.", 59 | is_flag=True, 60 | cls=RequiredIf, 61 | options=["is_describe"], 62 | ) 63 | @click.option( 64 | "-o", 65 | "--output", 66 | help=( 67 | "Output format. One of:" 68 | " json|yaml|name|go-template|go-template-file|template|templatefile|jsonpath" 69 | "|jsonpath-file." 70 | ), 71 | ) 72 | @click.option( 73 | "--describe", "is_describe", help="List details for the given topic.", is_flag=True 74 | ) 75 | @click.option( 76 | "--replication-factor", 77 | help="The replication factor for each partition in the topic being created.", 78 | cls=RequiredIf, 79 | options=["is_create"], 80 | type=int, 81 | ) 82 | @click.option( 83 | "--partitions", 84 | help="The number of partitions for the topic being created or altered.", 85 | cls=RequiredIf, 86 | options=["is_create"], 87 | type=int, 88 | ) 89 | @click.option("--create", "is_create", help="Create a new topic.", is_flag=True) 90 | @click.option("--list", "is_list", help="List all available topics.", is_flag=True) 91 | @click.option( 92 | "--topic", help="Topic Name", required=True, cls=NotRequiredIf, options=["is_list"] 93 | ) 94 | @kfk.command() 95 | def topics( 96 | topic, 97 | is_list, 98 | is_create, 99 | partitions, 100 | replication_factor, 101 | is_describe, 102 | output, 103 | native, 104 | command_config, 105 | is_delete, 106 | is_alter, 107 | config, 108 | delete_config, 109 | cluster, 110 | namespace, 111 | ): 112 | """Creates, alters, deletes, describes Kafka topic(s).""" 113 | if is_list: 114 | list(cluster, namespace) 115 | elif is_create: 116 | create(topic, partitions, replication_factor, config, cluster, namespace) 117 | elif is_describe: 118 | describe(topic, output, native, command_config, cluster, namespace) 119 | elif is_delete: 120 | delete(topic, cluster, namespace) 121 | elif is_alter: 122 | alter( 123 | topic, 124 | partitions, 125 | replication_factor, 126 | config, 127 | delete_config, 128 | cluster, 129 | namespace, 130 | ) 131 | else: 132 | raise_exception_for_missing_options("topics") 133 | 134 | 135 | def list(cluster, namespace): 136 | os.system( 137 | Kubectl() 138 | .get() 139 | .kafkatopics() 140 | .label("strimzi.io/cluster={cluster}") 141 | .namespace(namespace) 142 | .build() 143 | .format(cluster=cluster) 144 | ) 145 | 146 | 147 | def create(topic, partitions, replication_factor, config, cluster, namespace): 148 | with open( 149 | "{strimzi_path}/examples/topic/kafka-topic.yaml".format( 150 | strimzi_path=STRIMZI_PATH 151 | ).format(version=STRIMZI_VERSION) 152 | ) as file: 153 | topic_dict = yaml.full_load(file) 154 | 155 | topic_dict["metadata"]["name"] = topic 156 | topic_dict["metadata"]["labels"]["strimzi.io/cluster"] = cluster 157 | topic_dict["spec"]["partitions"] = int(partitions) 158 | topic_dict["spec"]["replicas"] = int(replication_factor) 159 | 160 | _add_config_if_provided(config, topic_dict) 161 | 162 | topic_yaml = yaml.dump(topic_dict) 163 | topic_temp_file = create_temp_file(topic_yaml) 164 | 165 | create_using_yaml(topic_temp_file.name, namespace) 166 | 167 | topic_temp_file.close() 168 | 169 | 170 | def describe(topic, output, native, command_config, cluster, namespace): 171 | if output is not None: 172 | os.system( 173 | Kubectl() 174 | .get() 175 | .kafkatopics(topic) 176 | .namespace(namespace) 177 | .output(output) 178 | .build() 179 | ) 180 | else: 181 | if native: 182 | native_command = ( 183 | "bin/kafka-topics.sh --bootstrap-server {cluster}-kafka-brokers:{port}" 184 | " --describe --topic {topic}" 185 | ) 186 | pod = cluster + "-kafka-0" 187 | container = "kafka" 188 | if command_config is not None: 189 | native_command = apply_client_config_from_file( 190 | native_command, 191 | command_config, 192 | "--command-config", 193 | container, 194 | pod, 195 | namespace, 196 | ) 197 | os.system( 198 | Kubectl() 199 | .exec("-it", pod) 200 | .container(container) 201 | .namespace(namespace) 202 | .exec_command(native_command) 203 | .build() 204 | .format(port=KAFKA_PORT, topic=topic, cluster=cluster) 205 | ) 206 | else: 207 | os.system( 208 | Kubectl().describe().kafkatopics(topic).namespace(namespace).build() 209 | ) 210 | 211 | 212 | def delete(topic, cluster, namespace): 213 | with open( 214 | "{strimzi_path}/examples/topic/kafka-topic.yaml".format( 215 | strimzi_path=STRIMZI_PATH 216 | ).format(version=STRIMZI_VERSION) 217 | ) as file: 218 | topic_dict = yaml.full_load(file) 219 | 220 | topic_dict["metadata"]["name"] = topic 221 | topic_dict["metadata"]["labels"]["strimzi.io/cluster"] = cluster 222 | 223 | topic_yaml = yaml.dump(topic_dict) 224 | topic_temp_file = create_temp_file(topic_yaml) 225 | 226 | delete_using_yaml(topic_temp_file.name, namespace) 227 | 228 | topic_temp_file.close() 229 | 230 | 231 | def alter( 232 | topic, partitions, replication_factor, config, delete_config, cluster, namespace 233 | ): 234 | stream = get_resource_as_stream( 235 | "kafkatopics", resource_name=topic, namespace=namespace 236 | ) 237 | topic_dict = yaml.full_load(stream) 238 | 239 | delete_last_applied_configuration(topic_dict) 240 | 241 | if partitions is not None: 242 | topic_dict["spec"]["partitions"] = int(partitions) 243 | 244 | if replication_factor is not None: 245 | topic_dict["spec"]["replicas"] = int(replication_factor) 246 | 247 | _add_config_if_provided(config, topic_dict) 248 | 249 | if len(delete_config) > 0: 250 | if topic_dict["spec"].get("config") is not None: 251 | delete_resource_config(delete_config, topic_dict["spec"]["config"]) 252 | 253 | topic_yaml = yaml.dump(topic_dict) 254 | topic_temp_file = create_temp_file(topic_yaml) 255 | 256 | replace_using_yaml(topic_temp_file.name, namespace) 257 | 258 | topic_temp_file.close() 259 | 260 | 261 | def _add_config_if_provided(config, topic_dict): 262 | if len(config) > 0: 263 | if topic_dict["spec"].get("config") is None: 264 | topic_dict["spec"]["config"] = {} 265 | add_kv_config_to_resource(config, topic_dict["spec"]["config"]) 266 | -------------------------------------------------------------------------------- /kfk/commons.py: -------------------------------------------------------------------------------- 1 | import io 2 | import ntpath 3 | import os 4 | import tempfile 5 | from subprocess import call 6 | 7 | import click 8 | from jproperties import Properties 9 | 10 | from kfk.constants import ( 11 | COMMA, 12 | EQUALS, 13 | KAFKA_PORT, 14 | KAFKA_SECURE_PORT, 15 | KAFKA_SSL, 16 | NEW_LINE, 17 | SEMICOLON, 18 | SPACE, 19 | ) 20 | from kfk.kubectl_command_builder import Kubectl 21 | from kfk.utils import convert_string_to_type, get_list_by_split_string 22 | 23 | # TODO: Message string to messages.py 24 | 25 | 26 | def raise_exception_for_missing_options(command_str): 27 | raise click.ClickException( 28 | f"Missing options: kfk {command_str} [OPTIONS] \nTry 'kfk {command_str} --help'" 29 | " for help." 30 | ) 31 | 32 | 33 | def print_cluster_resource_not_found_msg(cluster, namespace): 34 | click.echo(f"No resource found in Kafka cluster: {cluster}, namespace: {namespace}") 35 | 36 | 37 | def print_resource_not_found_msg(namespace): 38 | click.echo(f"No resource found in namespace: {namespace}") 39 | 40 | 41 | def delete_last_applied_configuration(resource_dict): 42 | if "annotations" in resource_dict["metadata"]: 43 | resource_dict["metadata"]["annotations"].pop( 44 | "kubectl.kubernetes.io/last-applied-configuration", None 45 | ) 46 | 47 | 48 | def add_kv_config_to_resource(config, dict_part, *converters): 49 | if type(config) is tuple or type(config) is list: 50 | for config_str in config: 51 | for converter in converters: 52 | config_str = converter(config_str) 53 | config_list = get_kv_config_list(config_str) 54 | dict_part[config_list[0]] = convert_string_to_type(config_list[1]) 55 | else: 56 | for converter in converters: 57 | config = converter(config) 58 | config_list = get_kv_config_list(config) 59 | dict_part[config_list[0]] = convert_string_to_type(config_list[1]) 60 | 61 | 62 | def add_properties_config_to_resource(properties, dict_part, *converters): 63 | for property_item in properties.items(): 64 | for converter in converters: 65 | property_item = converter(property_item) 66 | if property_item is not None: 67 | dict_part[property_item[0]] = convert_string_to_type( 68 | property_item[1].data 69 | ) 70 | 71 | 72 | def get_kv_config_list(config_str): 73 | return get_list_by_split_string(config_str, EQUALS) 74 | 75 | 76 | def get_config_list(config_str): 77 | if config_str is None: 78 | return list() 79 | else: 80 | return get_list_by_split_string(config_str, COMMA) 81 | 82 | 83 | def delete_resource_config(config, dict_part, *converters): 84 | if type(config) is tuple or type(config) is list: 85 | for config_str in config: 86 | for converter in converters: 87 | config_str = converter(config_str) 88 | dict_part.pop(config_str, None) 89 | else: 90 | dict_part.pop(config, None) 91 | 92 | 93 | def resource_exists( 94 | resource_type=None, resource_name=None, cluster=None, namespace=None 95 | ): 96 | command = Kubectl().get().resource(resource_type).namespace(namespace) 97 | 98 | if cluster is not None: 99 | command = command.label(f"strimzi.io/cluster={cluster}") 100 | 101 | return resource_name in os.popen(command.build()).read() 102 | 103 | 104 | def get_resource_yaml( 105 | resource_type=None, resource_name=None, cluster=None, namespace=None 106 | ): 107 | command = ( 108 | Kubectl() 109 | .get() 110 | .resource(resource_type, resource_name) 111 | .namespace(namespace) 112 | .output("yaml") 113 | ) 114 | 115 | if cluster is not None: 116 | command = command.label(f"strimzi.io/cluster={cluster}") 117 | 118 | return os.popen(command.build()).read() 119 | 120 | 121 | def get_resource_as_stream( 122 | resource_type=None, resource_name=None, cluster=None, namespace=None 123 | ): 124 | resource_yaml = get_resource_yaml(resource_type, resource_name, cluster, namespace) 125 | 126 | if not resource_yaml: 127 | raise click.exceptions.Exit(1) 128 | 129 | return io.StringIO(resource_yaml) 130 | 131 | 132 | def create_temp_file(stream): 133 | temp_file = tempfile.NamedTemporaryFile(mode="w+") 134 | temp_file.write(stream) 135 | temp_file.flush() 136 | return temp_file 137 | 138 | 139 | def open_file_in_system_editor(file): 140 | call([os.environ.get("EDITOR", "vim"), file]) 141 | 142 | 143 | def transfer_file_to_container( 144 | source_file_path, dest_file_path, container, pod, namespace 145 | ): 146 | os.system( 147 | Kubectl() 148 | .cp(source_file_path, f"{namespace}/{pod}:" + dest_file_path) 149 | .container(container) 150 | .build() 151 | ) 152 | 153 | 154 | def apply_client_config_from_file( 155 | native_command, config_file_path, config_file_flag, container, pod, namespace 156 | ): 157 | port = KAFKA_PORT 158 | delete_file_command = "" 159 | with open(config_file_path) as file: 160 | temp_file = create_temp_file(file.read()) 161 | lines = [] 162 | with open(temp_file.name) as temp_file: 163 | for cnt, producer_property in enumerate(temp_file): 164 | producer_property = producer_property.strip() 165 | if "security.protocol" in producer_property: 166 | producer_property_list = get_kv_config_list(producer_property) 167 | if producer_property_list[1] == KAFKA_SSL: 168 | port = KAFKA_SECURE_PORT 169 | if ( 170 | "ssl.truststore.location" in producer_property 171 | or "ssl.keystore.location" in producer_property 172 | ): 173 | producer_property_list = get_kv_config_list(producer_property) 174 | file_path = producer_property_list[1] 175 | new_file_path = "/tmp/" + ntpath.basename(file_path) 176 | transfer_file_to_container( 177 | file_path, new_file_path, container, pod, namespace 178 | ) 179 | producer_property = producer_property_list[0] + "=" + new_file_path 180 | delete_file_command = ( 181 | delete_file_command 182 | + "rm -rf" 183 | + SPACE 184 | + new_file_path 185 | + SEMICOLON 186 | ) 187 | lines.append(producer_property) 188 | with open(temp_file.name, "w") as temp_file: 189 | for line in lines: 190 | temp_file.write(line + NEW_LINE) 191 | new_config_file_path = "/tmp/" + ntpath.basename(config_file_path) 192 | transfer_file_to_container( 193 | temp_file.name, new_config_file_path, container, pod, namespace 194 | ) 195 | delete_file_command = ( 196 | delete_file_command + "rm -rf" + SPACE + new_config_file_path + SEMICOLON 197 | ) 198 | native_command = ( 199 | native_command + SPACE + config_file_flag + SPACE + new_config_file_path 200 | ) 201 | temp_file.close() 202 | return ( 203 | native_command.format_map(SafeDict(port=port)) + SEMICOLON + delete_file_command 204 | ) 205 | 206 | 207 | def get_properties_from_file(file): 208 | properties = Properties() 209 | properties.load(file.read()) 210 | return properties 211 | 212 | 213 | class SafeDict(dict): 214 | def __missing__(self, key): 215 | return "{" + key + "}" 216 | -------------------------------------------------------------------------------- /kfk/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import sys 4 | from pathlib import Path 5 | 6 | STRIMZI_VERSION = "0.41.0" 7 | KUBECTL_VERSION = "v1.29.3" 8 | 9 | 10 | def _get_processor_type(): 11 | if _is_64_bit(): 12 | if "arm" in platform.uname().machine or "aarch" in platform.uname().machine: 13 | return "arm64" 14 | else: 15 | return "amd64" 16 | else: 17 | if "arm" in platform.uname().machine: 18 | return "arm" 19 | else: 20 | return "386" 21 | 22 | 23 | def _is_64_bit(): 24 | return sys.maxsize > 2**32 25 | 26 | 27 | BASE_FOLDER = ".strimzi-kafka-cli" 28 | BASE_PATH = ( 29 | (str(Path.home()) + "/" + BASE_FOLDER) 30 | if os.environ.get("STRIMZI_KAFKA_CLI_BASE_PATH") is None 31 | else os.environ.get("STRIMZI_KAFKA_CLI_BASE_PATH") 32 | ) 33 | STRIMZI_PATH = ( 34 | (BASE_PATH + f"/strimzi-{STRIMZI_VERSION}") 35 | if os.environ.get("STRIMZI_KAFKA_CLI_STRIMZI_PATH") is None 36 | else os.environ.get("STRIMZI_KAFKA_CLI_STRIMZI_PATH") 37 | ) 38 | STRIMZI_RELEASE_URL = ( 39 | f"https://github.com/strimzi/strimzi-kafka-operator/releases/" 40 | f"download/{STRIMZI_VERSION}/strimzi-{STRIMZI_VERSION}.tar.gz" 41 | ) 42 | 43 | KUBECTL = "kubectl" if platform.system().lower() != "windows" else "kubectl.exe" 44 | KUBECTL_PATH = ( 45 | (BASE_PATH + "/" + KUBECTL) 46 | if os.environ.get("STRIMZI_KAFKA_CLI_KUBECTL_PATH") is None 47 | else os.environ.get("STRIMZI_KAFKA_CLI_KUBECTL_PATH") 48 | ) 49 | PROCESSOR_TYPE = _get_processor_type() 50 | KUBECTL_RELEASE_URL = ( 51 | f"https://storage.googleapis.com/kubernetes-release/release/" 52 | f"{KUBECTL_VERSION}/bin/{platform.system().lower()}/" 53 | f"{PROCESSOR_TYPE}/{KUBECTL}" 54 | ) 55 | -------------------------------------------------------------------------------- /kfk/constants.py: -------------------------------------------------------------------------------- 1 | SPACE = " " 2 | COLON = ":" 3 | SEMICOLON = ";" 4 | AMPERSAND = "&" 5 | COMMA = "," 6 | EQUALS = "=" 7 | NEW_LINE = "\n" 8 | KAFKA_PORT = "9092" 9 | KAFKA_SECURE_PORT = "9093" 10 | KAFKA_SSL = "SSL" 11 | COMMON_NAME_PREFIX = "CN=" 12 | BROKER_TMP_FOLDER_PATH = "/tmp" 13 | CONNECT_OUTPUT_TYPE_DOCKER = "docker" 14 | EXTENSION_TAR_GZ = ".tar.gz" 15 | EXTENSION_JAR = ".jar" 16 | EXTENSION_ZIP = ".zip" 17 | TRUE = "true" 18 | 19 | 20 | class SpecialTexts: 21 | BROKER_CONFIG_FILE_USER_CONFIG_HEADER = ( 22 | "\n##########\n# User provided configuration\n##########\n" 23 | ) 24 | CONNECT_BOOTSTRAP_SERVERS = "bootstrap.servers" 25 | CONNECT_IMAGE = "image" 26 | CONNECT_PLUGIN_URL = "plugin.url" 27 | CONNECT_PLUGIN_PATH = "plugin.path" 28 | CONNECTOR_NAME = "name" 29 | CONNECTOR_TASKS_MAX = "tasks.max" 30 | CONNECTOR_CLASS = "connector.class" 31 | OPERATOR_ROLE_BINDING = "RoleBinding" 32 | OPERATOR_MY_PROJECT = "myproject" 33 | -------------------------------------------------------------------------------- /kfk/kubectl_command_builder.py: -------------------------------------------------------------------------------- 1 | from kfk.config import KUBECTL_PATH 2 | from kfk.constants import SPACE 3 | 4 | 5 | class Kubectl: 6 | def __init__(self): 7 | self.cmd_str = KUBECTL_PATH 8 | 9 | def version(self, *vals): 10 | self.cmd_str = self.cmd_str + SPACE + "version" 11 | for val in vals: 12 | self.cmd_str = self.cmd_str + SPACE + val 13 | return self 14 | 15 | def get(self): 16 | self.cmd_str = self.cmd_str + SPACE + "get" 17 | return self 18 | 19 | def describe(self): 20 | self.cmd_str = self.cmd_str + SPACE + "describe" 21 | return self 22 | 23 | def edit(self): 24 | self.cmd_str = self.cmd_str + SPACE + "edit" 25 | return self 26 | 27 | def exec(self, flag, pod_name): 28 | self.cmd_str = self.cmd_str + SPACE + "exec" + SPACE + flag + SPACE + pod_name 29 | return self 30 | 31 | def exec_command(self, command): 32 | self.cmd_str = ( 33 | self.cmd_str 34 | + SPACE 35 | + "--" 36 | + SPACE 37 | + "bash -c" 38 | + SPACE 39 | + '"' 40 | + command 41 | + '"' 42 | ) 43 | return self 44 | 45 | def cp(self, source_path, destination_path): 46 | self.cmd_str = ( 47 | self.cmd_str + SPACE + "cp" + SPACE + source_path + SPACE + destination_path 48 | ) 49 | return self 50 | 51 | def kafkas(self, *vals): 52 | return self.resource("kafkas", *vals) 53 | 54 | def kafkaconnects(self, *vals): 55 | return self.resource("kafkaconnects", *vals) 56 | 57 | def kafkaconnectors(self, *vals): 58 | return self.resource("kafkaconnectors", *vals) 59 | 60 | def kafkatopics(self, *vals): 61 | return self.resource("kafkatopics", *vals) 62 | 63 | def kafkausers(self, *vals): 64 | return self.resource("kafkausers", *vals) 65 | 66 | def configmap(self, *vals): 67 | return self.resource("configmap", *vals) 68 | 69 | def secret(self, *vals): 70 | return self.resource("secret", *vals) 71 | 72 | def resource(self, resource_name, *vals): 73 | self.cmd_str = self.cmd_str + SPACE + resource_name 74 | for val in vals: 75 | self.cmd_str = self.cmd_str + SPACE + val 76 | return self 77 | 78 | def label(self, val): 79 | self.cmd_str = self.cmd_str + SPACE + "-l" + SPACE + val 80 | return self 81 | 82 | def namespace(self, *vals): 83 | if len(vals) > 0 and vals[0]: 84 | self.cmd_str = self.cmd_str + SPACE + "-n" + SPACE + vals[0] 85 | else: 86 | self.cmd_str = self.cmd_str + SPACE + "--all-namespaces" 87 | return self 88 | 89 | def container(self, val): 90 | self.cmd_str = self.cmd_str + SPACE + "-c" + SPACE + val 91 | return self 92 | 93 | def output(self, val): 94 | self.cmd_str = self.cmd_str + SPACE + "-o" + SPACE + val 95 | return self 96 | 97 | def from_file(self, val): 98 | self.cmd_str = self.cmd_str + SPACE + "-f" + SPACE + val 99 | return self 100 | 101 | def build(self): 102 | return self.cmd_str 103 | -------------------------------------------------------------------------------- /kfk/kubernetes_commons.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import re 4 | import sys 5 | from os import path 6 | 7 | import yaml 8 | from kubernetes import client, config 9 | 10 | config.load_kube_config() 11 | api_client = client.ApiClient() 12 | 13 | 14 | def yaml_object_argument_filter(func): 15 | def inner(k8s_api, yml_object, kind, **kwargs): 16 | if kind != "custom_object": 17 | kwargs.pop("version") 18 | kwargs.pop("group") 19 | kwargs.pop("plural") 20 | return func(k8s_api, yml_object, kind, **kwargs) 21 | 22 | return inner 23 | 24 | 25 | def delete_object(name, resource_type, namespace): 26 | k8s_api = client.CoreV1Api(api_client) 27 | _delete_object(k8s_api, name, resource_type, namespace=namespace) 28 | print(f"{resource_type.capitalize()} `{name}` deleted.") 29 | 30 | 31 | def create_registry_secret( 32 | name: str, 33 | registry: str, 34 | username: str, 35 | password: str, 36 | ): 37 | core_api = client.CoreV1Api(api_client) 38 | 39 | auth = base64.b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8") 40 | 41 | docker_config_dict = { 42 | "auths": { 43 | registry: { 44 | "username": username, 45 | "password": password, 46 | "email": "", 47 | "auth": auth, 48 | } 49 | } 50 | } 51 | 52 | docker_config = base64.b64encode( 53 | json.dumps(docker_config_dict).encode("utf-8") 54 | ).decode("utf-8") 55 | 56 | core_api.create_namespaced_secret( 57 | namespace="default", 58 | body=client.V1Secret( 59 | metadata=client.V1ObjectMeta( 60 | name=name, 61 | ), 62 | type="kubernetes.io/dockerconfigjson", 63 | data={".dockerconfigjson": docker_config}, 64 | ), 65 | ) 66 | 67 | print(f"Registry Secret `{name}` created.") 68 | 69 | 70 | def create_using_yaml(file_path, namespace): 71 | _operate_using_yaml( 72 | api_client, 73 | file_path, 74 | "create", 75 | yaml_objects=None, 76 | verbose=True, 77 | namespace=namespace, 78 | ) 79 | 80 | 81 | def delete_using_yaml(file_path, namespace): 82 | _operate_using_yaml( 83 | api_client, 84 | file_path, 85 | "delete", 86 | yaml_objects=None, 87 | verbose=True, 88 | namespace=namespace, 89 | ) 90 | 91 | 92 | def replace_using_yaml(file_path, namespace): 93 | _operate_using_yaml( 94 | api_client, 95 | file_path, 96 | "replace", 97 | yaml_objects=None, 98 | verbose=True, 99 | namespace=namespace, 100 | ) 101 | 102 | 103 | def _operate_using_yaml( 104 | k8s_client, 105 | yaml_file=None, 106 | operation=None, 107 | yaml_objects=None, 108 | verbose=False, 109 | namespace="default", 110 | **kwargs, 111 | ): 112 | def _operate_with(objects): 113 | failures = [] 114 | k8s_objects = [] 115 | for yml_object in objects: 116 | if yml_object is None: 117 | continue 118 | try: 119 | created = _operate_using_dict( 120 | k8s_client, 121 | yml_object, 122 | operation, 123 | verbose, 124 | namespace=namespace, 125 | **kwargs, 126 | ) 127 | k8s_objects.append(created) 128 | except FailToExecuteError as failure: 129 | failures.extend(failure.api_exceptions) 130 | if failures: 131 | raise FailToExecuteError(failures) 132 | return k8s_objects 133 | 134 | if yaml_objects: 135 | yml_object_all = yaml_objects 136 | return _operate_with(yml_object_all) 137 | elif yaml_file: 138 | with open(path.abspath(yaml_file)) as f: 139 | yml_object_all = yaml.safe_load_all(f) 140 | return _operate_with(yml_object_all) 141 | else: 142 | raise ValueError( 143 | "One of `yaml_file` or `yaml_objects` arguments must be provided" 144 | ) 145 | 146 | 147 | def _operate_using_dict( 148 | k8s_client, yml_object, operation, verbose, namespace="default", **kwargs 149 | ): 150 | api_exceptions = [] 151 | if "List" in yml_object["kind"]: 152 | kind = yml_object["kind"].replace("List", "") 153 | for yml_doc in yml_object["items"]: 154 | if kind != "": 155 | yml_doc["apiVersion"] = yml_object["apiVersion"] 156 | yml_doc["kind"] = kind 157 | try: 158 | _operate_using_dict_single_object( 159 | k8s_client, 160 | yml_doc, 161 | operation, 162 | verbose, 163 | namespace=namespace, 164 | **kwargs, 165 | ) 166 | except client.rest.ApiException as api_exception: 167 | api_exceptions.append(api_exception) 168 | else: 169 | try: 170 | _operate_using_dict_single_object( 171 | k8s_client, 172 | yml_object, 173 | operation, 174 | verbose, 175 | namespace=namespace, 176 | **kwargs, 177 | ) 178 | except client.rest.ApiException as api_exception: 179 | api_exceptions.append(api_exception) 180 | 181 | if api_exceptions: 182 | raise FailToExecuteError(api_exceptions) 183 | 184 | 185 | def _operate_using_dict_single_object( 186 | k8s_client, yml_object, operation, verbose=False, namespace="default", **kwargs 187 | ): 188 | object_type = "" 189 | # get group and version from apiVersion 190 | group, _, version = yml_object["apiVersion"].partition("/") 191 | if version == "": 192 | version = group 193 | group = "core" 194 | # Take care for the case e.g. api_type is "apiextensions.k8s.io" 195 | group_prefix = "".join(group.rsplit(".k8s.io", 1)) 196 | # convert group name from DNS subdomain format to 197 | # python class name convention 198 | group_prefix = "".join(word.capitalize() for word in group_prefix.split(".")) 199 | func = "{0}{1}Api".format(group_prefix, version.capitalize()) 200 | 201 | try: 202 | k8s_api = getattr(client, func)(k8s_client) 203 | except AttributeError: 204 | func = "CustomObjectsApi" 205 | k8s_api = getattr(client, func)(k8s_client) 206 | object_type = "custom_object" 207 | 208 | kind = yml_object["kind"] 209 | kind_snake_case = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", kind) 210 | object_type_from_kind = re.sub( 211 | "([a-z0-9])([A-Z])", r"\1_\2", kind_snake_case 212 | ).lower() 213 | name = yml_object["metadata"]["name"] 214 | 215 | if not object_type: 216 | object_type = object_type_from_kind 217 | 218 | getattr(sys.modules[__name__], f"_{operation}_using_yaml_object")( 219 | k8s_api, 220 | yml_object, 221 | object_type, 222 | version=version, 223 | group=group, 224 | plural=kind.lower() + "s", 225 | namespace=namespace, 226 | ) 227 | if verbose: 228 | msg = f"{kind} `{name}` {operation}d." 229 | print(msg) 230 | 231 | 232 | @yaml_object_argument_filter 233 | def _create_using_yaml_object(k8s_api, yml_object, object_type, **kwargs): 234 | if hasattr(k8s_api, f"create_namespaced_{object_type}"): 235 | if "namespace" in yml_object["metadata"]: 236 | namespace = yml_object["metadata"]["namespace"] 237 | kwargs["namespace"] = namespace 238 | resp = getattr(k8s_api, f"create_namespaced_{object_type}")( 239 | body=yml_object, **kwargs 240 | ) 241 | else: 242 | kwargs.pop("namespace", None) 243 | resp = getattr(k8s_api, f"create_{object_type}")(body=yml_object, **kwargs) 244 | return resp 245 | 246 | 247 | @yaml_object_argument_filter 248 | def _delete_using_yaml_object(k8s_api, yml_object, object_type, **kwargs): 249 | if "namespace" in yml_object["metadata"]: 250 | namespace = yml_object["metadata"]["namespace"] 251 | kwargs["namespace"] = namespace 252 | name = yml_object["metadata"]["name"] 253 | _delete_object(k8s_api, name, object_type, **kwargs) 254 | 255 | 256 | @yaml_object_argument_filter 257 | def _replace_using_yaml_object(k8s_api, yml_object, object_type, **kwargs): 258 | if hasattr(k8s_api, f"replace_namespaced_{object_type}"): 259 | if "namespace" in yml_object["metadata"]: 260 | namespace = yml_object["metadata"]["namespace"] 261 | kwargs["namespace"] = namespace 262 | resp = getattr(k8s_api, f"replace_namespaced_{object_type}")( 263 | body=yml_object, **kwargs 264 | ) 265 | else: 266 | kwargs.pop("namespace", None) 267 | resp = getattr(k8s_api, f"replace_{object_type}")(body=yml_object, **kwargs) 268 | return resp 269 | 270 | 271 | def _delete_object(k8s_api, name, object_type, delete_options_version="V1", **kwargs): 272 | try: 273 | if hasattr(k8s_api, f"delete_namespaced_{object_type}"): 274 | resp = getattr(k8s_api, f"delete_namespaced_{object_type}")( 275 | name=name, 276 | body=getattr(client, f"{delete_options_version}DeleteOptions")( 277 | propagation_policy="Background", grace_period_seconds=5 278 | ), 279 | **kwargs, 280 | ) 281 | else: 282 | kwargs.pop("namespace", None) 283 | resp = getattr(k8s_api, f"delete_{object_type}")( 284 | name=name, 285 | body=getattr(client, f"{delete_options_version}DeleteOptions")( 286 | propagation_policy="Background", grace_period_seconds=5 287 | ), 288 | **kwargs, 289 | ) 290 | return resp 291 | except client.rest.ApiException as api_exception: 292 | if api_exception.reason != "Not Found": 293 | raise api_exception 294 | 295 | 296 | class FailToExecuteError(Exception): 297 | """An exception class for handling error if an error occurred when handling 298 | a yaml file during creation or deletion of the resource.""" 299 | 300 | def __init__(self, api_exceptions): 301 | self.api_exceptions = api_exceptions 302 | 303 | def __str__(self): 304 | msg = "" 305 | for api_exception in self.api_exceptions: 306 | msg += "Error from server ({0}):{1}".format( 307 | api_exception.reason, api_exception.body 308 | ) 309 | return msg 310 | -------------------------------------------------------------------------------- /kfk/main.py: -------------------------------------------------------------------------------- 1 | from kfk.commands.acls import acls 2 | from kfk.commands.clusters import clusters 3 | from kfk.commands.configs import configs 4 | from kfk.commands.connect.clusters import clusters 5 | from kfk.commands.connect.connectors import connectors 6 | from kfk.commands.console import console_consumer, console_producer 7 | from kfk.commands.env import env 8 | from kfk.commands.main import kfk 9 | from kfk.commands.operator import operator 10 | from kfk.commands.topics import topics 11 | from kfk.commands.users import users 12 | from kfk.setup import setup 13 | 14 | setup() 15 | -------------------------------------------------------------------------------- /kfk/messages.py: -------------------------------------------------------------------------------- 1 | class Messages: 2 | USER_PROVIDED_CONFIG_HEADER = ( 3 | "All user provided configs for brokers in the cluster are:" 4 | ) 5 | CLUSTER_CREATE_CONFIRMATION = ( 6 | "Are you sure you want to create the cluster with the saved configuration?" 7 | ) 8 | CLUSTER_DELETE_CONFIRMATION = "Are you sure you want to delete the cluster?" 9 | IMAGE_REGISTRY_USER_NAME = "Image registry username" 10 | IMAGE_REGISTRY_PASSWORD = "Image registry password" 11 | 12 | 13 | class Errors: 14 | CONFIG_FILE_SHOULD_BE_PROVIDED = ( 15 | "A configuration file should be provided for connect cluster" 16 | ) 17 | NOT_A_VALID_URL = "Not a valid URL" 18 | NO_PLUGIN_TYPE_DETECTED = "No plugin type detected" 19 | -------------------------------------------------------------------------------- /kfk/option_extensions.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | 4 | class NotRequiredIf(click.Option): 5 | # TODO: Refactor here 6 | 7 | def __init__(self, *args, **kwargs): 8 | self.options = kwargs.pop("options") 9 | assert self.options, "'options' parameter required" 10 | kwargs["help"] = ( 11 | kwargs.get("help", "") 12 | + " This argument is mutually exclusive with %s" % self.options 13 | ).strip() 14 | super(NotRequiredIf, self).__init__(*args, **kwargs) 15 | 16 | def handle_parse_result(self, ctx, opts, args): 17 | control_options_exist = False 18 | 19 | for options in self.options: 20 | control_options_exist = options in opts 21 | if control_options_exist is True: 22 | break 23 | 24 | if control_options_exist: 25 | self.required = None 26 | 27 | return super(NotRequiredIf, self).handle_parse_result(ctx, opts, args) 28 | 29 | 30 | class RequiredIf(click.Option): 31 | # TODO: Refactor here 32 | 33 | def __init__(self, *args, **kwargs): 34 | self.options = kwargs.pop("options") 35 | assert self.options, "'options' parameter required" 36 | kwargs["help"] = ( 37 | kwargs.get("help", "") 38 | + " This argument is mutually inclusive with %s" % self.options 39 | ).strip() 40 | super(RequiredIf, self).__init__(*args, **kwargs) 41 | 42 | def handle_parse_result(self, ctx, opts, args): 43 | control_options_exist = False 44 | 45 | for options in self.options: 46 | control_options_exist = options in opts 47 | if control_options_exist is True: 48 | break 49 | 50 | if control_options_exist: 51 | self.required = True 52 | else: 53 | self.required = None 54 | 55 | return super(RequiredIf, self).handle_parse_result(ctx, opts, args) 56 | -------------------------------------------------------------------------------- /kfk/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import ssl 3 | import stat 4 | import subprocess 5 | import tarfile 6 | from pathlib import Path 7 | 8 | import wget 9 | 10 | from kfk.config import ( 11 | BASE_PATH, 12 | KUBECTL_PATH, 13 | KUBECTL_RELEASE_URL, 14 | KUBECTL_VERSION, 15 | STRIMZI_PATH, 16 | STRIMZI_RELEASE_URL, 17 | STRIMZI_VERSION, 18 | ) 19 | from kfk.kubectl_command_builder import Kubectl 20 | 21 | ssl._create_default_https_context = ssl._create_unverified_context 22 | 23 | 24 | def setup(): 25 | download_kubectl_if_not_exists() 26 | update_kubectl_if_new_version_exists() 27 | download_strimzi_if_not_exists() 28 | 29 | 30 | def download_kubectl_if_not_exists(): 31 | if not os.path.exists(KUBECTL_PATH): 32 | print(f"Creating Strimzi Kafka CLI Dependencies folder: {BASE_PATH}\n") 33 | Path(BASE_PATH).mkdir(exist_ok=True) 34 | 35 | _download_kubectl() 36 | 37 | 38 | def update_kubectl_if_new_version_exists(): 39 | if ( 40 | os.path.exists(KUBECTL_PATH) 41 | and os.environ.get("STRIMZI_KAFKA_CLI_KUBECTL_VERSION") is None 42 | and os.environ.get("STRIMZI_KAFKA_CLI_KUBECTL_PATH") is None 43 | and KUBECTL_VERSION 44 | not in subprocess.check_output( 45 | Kubectl().version("--client=true").build(), 46 | shell=True, 47 | stderr=subprocess.STDOUT, 48 | ).decode("utf-8") 49 | ): 50 | os.rename(KUBECTL_PATH, KUBECTL_PATH + "_old") 51 | _download_kubectl() 52 | 53 | 54 | def _download_kubectl(): 55 | print(f"Downloading dependency: kubectl {KUBECTL_VERSION}...\n") 56 | wget.download(KUBECTL_RELEASE_URL, KUBECTL_PATH) 57 | print("\nDownload successfully completed!\n") 58 | current_stat = os.stat(KUBECTL_PATH) 59 | os.chmod(KUBECTL_PATH, current_stat.st_mode | stat.S_IEXEC) 60 | 61 | 62 | def download_strimzi_if_not_exists(): 63 | if not os.path.exists(STRIMZI_PATH): 64 | strimzi_tarfile_path = STRIMZI_PATH + ".tar.gz" 65 | 66 | print(f"Creating Strimzi Kafka CLI Dependencies folder: {BASE_PATH}\n") 67 | Path(BASE_PATH).mkdir(exist_ok=True) 68 | 69 | print(f"Downloading dependency: Strimzi {STRIMZI_VERSION}...\n") 70 | wget.download(STRIMZI_RELEASE_URL, strimzi_tarfile_path) 71 | print("\nDownload successfully completed!\n") 72 | print(f"Extracting Strimzi {STRIMZI_VERSION}...\n") 73 | tar = tarfile.open(strimzi_tarfile_path) 74 | tar.extractall(path=BASE_PATH) 75 | tar.close() 76 | os.remove(strimzi_tarfile_path) 77 | -------------------------------------------------------------------------------- /kfk/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def convert_string_to_type(str_val): 5 | str_val = str(str_val) 6 | if str_val.isdigit(): 7 | return int(str_val) 8 | elif is_number(str_val): 9 | return float(str_val) 10 | elif is_bool(str_val): 11 | return convert_string_to_boolean(str_val) 12 | else: 13 | return str_val 14 | 15 | 16 | def is_number(str_val): 17 | try: 18 | float(str_val) 19 | except ValueError: 20 | return False 21 | return True 22 | 23 | 24 | def is_bool(str_val): 25 | if str_val == "true" or str_val == "false": 26 | return True 27 | else: 28 | return False 29 | 30 | 31 | def convert_string_to_boolean(str_val): 32 | if str_val == "true": 33 | return True 34 | else: 35 | return False 36 | 37 | 38 | def snake_to_camel_case(snake_str): 39 | components = snake_str.split("_") 40 | return components[0] + "".join(x.title() for x in components[1:]) 41 | 42 | 43 | def get_list_by_split_string(str_val, split_char): 44 | # TODO: exception here 45 | return str_val.split(split_char) 46 | 47 | 48 | def is_valid_url(url): 49 | regex = re.compile( 50 | r"^(?:http|ftp)s?://" 51 | r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+" 52 | r"(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|localhost|" 53 | r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" 54 | r"(?::\d+)?" 55 | r"(?:/?|[/?]\S+)$", 56 | re.IGNORECASE, 57 | ) 58 | 59 | return re.match(regex, url) is not None 60 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "strimzi-kafka-cli" 3 | version = "0.1.0-alpha79" 4 | description = "Command Line Interface for Strimzi Kafka Operator" 5 | authors = [{ name = "Aykut Bulgu", email = "aykut@systemcraftsman.com" }] 6 | readme = "README.md" 7 | license = {text = "Apache-2.0"} 8 | requires-python = ">=3.9,<3.14" 9 | keywords = ["kafka", "strimzi", "cli", "operator", "kubernetes", "k8s", "openshift", "apache-kafka"] 10 | 11 | classifiers=[ 12 | 'Intended Audience :: Developers', 13 | 'Topic :: Software Development', 14 | 'License :: OSI Approved :: Apache Software License', 15 | 'Programming Language :: Python :: 3', 16 | 'Programming Language :: Python :: 3.9', 17 | 'Programming Language :: Python :: 3.10', 18 | 'Programming Language :: Python :: 3.11', 19 | 'Programming Language :: Python :: 3.12', 20 | 'Programming Language :: Python :: 3.13', 21 | 'Programming Language :: Python :: 3 :: Only', 22 | 'Operating System :: Microsoft :: Windows', 23 | 'Operating System :: POSIX :: Linux', 24 | 'Operating System :: POSIX', 25 | 'Operating System :: Unix', 26 | 'Operating System :: MacOS' 27 | ] 28 | 29 | dependencies = [ 30 | "click == 8.0.4", 31 | "pyyaml == 6.0.1", 32 | "wget == 3.2", 33 | "jproperties == 2.1.1", 34 | "kubernetes == 28.1.0", 35 | ] 36 | 37 | [project.optional-dependencies] 38 | dev = [ 39 | "twine", 40 | "flake8", 41 | "flake8-pyproject", 42 | "pytest", 43 | "wheel", 44 | "build", 45 | "isort", 46 | "black", 47 | "mypy", 48 | "pre-commit<4", 49 | "bandit", 50 | "autoflake" 51 | ] 52 | 53 | [tool.urls] 54 | Tracker = "https://github.com/systemcraftsman/strimzi-kafka-cli/issues" 55 | Homepage = "https://github.com/systemcraftsman/strimzi-kafka-cli" 56 | Repository = "https://github.com/systemcraftsman/strimzi-kafka-cli" 57 | Documentation = "https://github.com/systemcraftsman/strimzi-kafka-cli/blob/main/README.md" 58 | 59 | 60 | [tool.setuptools] 61 | package-dir = {"kfk" = "kfk"} 62 | include-package-data = true 63 | license-files = ["LICENSE"] 64 | zip-safe = true 65 | 66 | 67 | [tool.flake8] 68 | exclude = ".git,venv,.venv,env,.env,__pycache__,docs,dist" 69 | max-complexity = 10 70 | max-line-length = 88 71 | extend-ignore = """ 72 | W503, 73 | E203, 74 | E701, 75 | C901, 76 | """ 77 | per-file-ignores = """ 78 | ./kfk/main.py: F401, F811 79 | """ 80 | 81 | [tool.isort] 82 | line_length = 88 83 | profile = "black" 84 | 85 | [tool.bandit] 86 | target = ["tests", "kfk"] 87 | tests = ["B201", "B301"] 88 | 89 | [tool.autoflake] 90 | check = true 91 | imports = ["kfk"] 92 | 93 | 94 | [tool.black] 95 | target-version = ["py39", "py310", "py311", "py312", "py313"] 96 | line-length = 88 97 | include = '\.pyi?$' 98 | exclude = ''' 99 | /( 100 | \.git 101 | | \.hg 102 | | \.mypy_cache 103 | | \.tox 104 | | \.venv 105 | | _build 106 | | buck-out 107 | | build 108 | | dist 109 | | docs 110 | )/ 111 | ''' 112 | 113 | 114 | [build-system] 115 | requires = ["setuptools"] 116 | build-backend = "setuptools.build_meta" 117 | 118 | [project.scripts] 119 | kfk = "kfk.main:kfk" 120 | 121 | [tool.pytest.ini_options] 122 | addopts = [ 123 | "--import-mode=importlib", 124 | ] 125 | -------------------------------------------------------------------------------- /tests/files/client.properties: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.truststore.location=~/Desktop/truststore.jks 3 | ssl.truststore.password=123456 4 | ssl.keystore.location=~/Desktop/user.p12 5 | ssl.keystore.password=123456 6 | -------------------------------------------------------------------------------- /tests/files/connect.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=my-cluster-kafka-bootstrap:9092 2 | key.converter=org.apache.kafka.connect.json.JsonConverter 3 | value.converter=org.apache.kafka.connect.json.JsonConverter 4 | key.converter.schemas.enable=true 5 | value.converter.schemas.enable=true 6 | offset.storage.topic=connect-cluster-offsets 7 | config.storage.topic=connect-cluster-configs 8 | status.storage.topic=connect-cluster-status 9 | config.storage.replication.factor=1 10 | offset.storage.replication.factor=1 11 | status.storage.replication.factor=1 12 | 13 | image=quay.io/systemcraftsman/test-connect-cluster:latest 14 | plugin.url=https://repo1.maven.org/maven2/io/debezium/debezium-connector-postgres/1.3.1.Final/debezium-connector-postgres-1.3.1.Final-plugin.tar.gz,https://github.com/jcustenborder/kafka-connect-twitter/releases/download/0.2.26/kafka-connect-twitter-0.2.26.tar.gz 15 | -------------------------------------------------------------------------------- /tests/files/connect_with_invalid_url.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=my-cluster-kafka-bootstrap:9092 2 | key.converter=org.apache.kafka.connect.json.JsonConverter 3 | value.converter=org.apache.kafka.connect.json.JsonConverter 4 | key.converter.schemas.enable=true 5 | value.converter.schemas.enable=true 6 | offset.storage.topic=connect-cluster-offsets 7 | config.storage.topic=connect-cluster-configs 8 | status.storage.topic=connect-cluster-status 9 | config.storage.replication.factor=1 10 | offset.storage.replication.factor=1 11 | status.storage.replication.factor=1 12 | 13 | image=quay.io/systemcraftsman/test-connect-cluster:latest 14 | plugin.url=https://repo1.maven.org/maven2/io/debezium/debezium-connector-postgres/1.3.1.Final/debezium-connector-postgres-1.3.1.Final-plugin.tar.gz,notavalidurl 15 | -------------------------------------------------------------------------------- /tests/files/connect_with_only_image.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=my-cluster-kafka-bootstrap:9092 2 | key.converter=org.apache.kafka.connect.json.JsonConverter 3 | value.converter=org.apache.kafka.connect.json.JsonConverter 4 | key.converter.schemas.enable=true 5 | value.converter.schemas.enable=true 6 | offset.storage.topic=connect-cluster-offsets 7 | config.storage.topic=connect-cluster-configs 8 | status.storage.topic=connect-cluster-status 9 | config.storage.replication.factor=1 10 | offset.storage.replication.factor=1 11 | status.storage.replication.factor=1 12 | 13 | image=quay.io/systemcraftsman/test-connect-cluster:latest 14 | -------------------------------------------------------------------------------- /tests/files/connect_with_zip_jar_plugins.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=my-cluster-kafka-bootstrap:9092 2 | key.converter=org.apache.kafka.connect.json.JsonConverter 3 | value.converter=org.apache.kafka.connect.json.JsonConverter 4 | key.converter.schemas.enable=true 5 | value.converter.schemas.enable=true 6 | offset.storage.topic=connect-cluster-offsets 7 | config.storage.topic=connect-cluster-configs 8 | status.storage.topic=connect-cluster-status 9 | config.storage.replication.factor=1 10 | offset.storage.replication.factor=1 11 | status.storage.replication.factor=1 12 | 13 | image=quay.io/systemcraftsman/test-connect-cluster:latest 14 | plugin.url=https://test.com/file.zip,https://test.com/file.jar 15 | -------------------------------------------------------------------------------- /tests/files/file-stream-connector.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | name=local-file-source 17 | connector.class=FileStreamSource 18 | tasks.max=1 19 | file=test.txt 20 | topic=connect-test 21 | -------------------------------------------------------------------------------- /tests/files/twitter-connector.properties: -------------------------------------------------------------------------------- 1 | name=twitter-source-connector 2 | tasks.max=1 3 | connector.class=com.github.jcustenborder.kafka.connect.twitter.TwitterSourceConnector 4 | 5 | # Set these required values 6 | process.deletes=false 7 | filter.keywords=kafka 8 | kafka.status.topic=connect-twitter-status 9 | kafka.delete.topic=connect-twitter-delete 10 | # put your own credentials here - don't share with anyone 11 | twitter.oauth.consumerKey=aconsumerkey 12 | twitter.oauth.consumerSecret=aconsumersecret 13 | twitter.oauth.accessToken=anaccesstoken 14 | twitter.oauth.accessTokenSecret=anaccesstokensecret 15 | -------------------------------------------------------------------------------- /tests/files/twitter_connector_with_config_change.properties: -------------------------------------------------------------------------------- 1 | name=twitter-source-connector 2 | tasks.max=2 3 | connector.class=com.github.jcustenborder.kafka.connect.twitter.TwitterSourceConnector 4 | 5 | process.deletes=true 6 | test.config=test 7 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-config.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: my-cluster-kafka-config 5 | namespace: kafka 6 | ownerReferences: 7 | - apiVersion: kafka.strimzi.io/v1beta1 8 | kind: Kafka 9 | name: my-cluster 10 | uid: 54436ba3-51c7-44aa-a117-c495e4e5e334 11 | controller: false 12 | blockOwnerDeletion: false 13 | labels: 14 | app.kubernetes.io/instance: my-cluster 15 | app.kubernetes.io/managed-by: strimzi-cluster-operator 16 | app.kubernetes.io/name: kafka 17 | app.kubernetes.io/part-of: strimzi-my-cluster 18 | strimzi.io/cluster: my-cluster 19 | strimzi.io/kind: Kafka 20 | strimzi.io/name: strimzi 21 | data: 22 | advertised-hostnames.config: >- 23 | PLAIN_9092_0://my-cluster-kafka-0.my-cluster-kafka-brokers.kafka.svc 24 | PLAIN_9092_1://my-cluster-kafka-1.my-cluster-kafka-brokers.kafka.svc 25 | PLAIN_9092_2://my-cluster-kafka-2.my-cluster-kafka-brokers.kafka.svc 26 | TLS_9093_0://my-cluster-kafka-0.my-cluster-kafka-brokers.kafka.svc 27 | TLS_9093_1://my-cluster-kafka-1.my-cluster-kafka-brokers.kafka.svc 28 | TLS_9093_2://my-cluster-kafka-2.my-cluster-kafka-brokers.kafka.svc 29 | advertised-ports.config: >- 30 | PLAIN_9092_0://9092 PLAIN_9092_1://9092 PLAIN_9092_2://9092 31 | TLS_9093_0://9093 TLS_9093_1://9093 TLS_9093_2://9093 32 | listeners.config: PLAIN_9092 TLS_9093 33 | log4j.properties: > 34 | # Do not change this generated file. Logging can be configured in the 35 | corresponding Kubernetes resource. 36 | 37 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 38 | 39 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout 40 | 41 | log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) 42 | [%t]%n 43 | 44 | kafka.root.logger.level=INFO 45 | 46 | log4j.rootLogger=${kafka.root.logger.level}, CONSOLE 47 | 48 | log4j.logger.org.I0Itec.zkclient.ZkClient=INFO 49 | 50 | log4j.logger.org.apache.zookeeper=INFO 51 | 52 | log4j.logger.kafka=INFO 53 | 54 | log4j.logger.org.apache.kafka=INFO 55 | 56 | log4j.logger.kafka.request.logger=WARN, CONSOLE 57 | 58 | log4j.logger.kafka.network.Processor=OFF 59 | 60 | log4j.logger.kafka.server.KafkaApis=OFF 61 | 62 | log4j.logger.kafka.network.RequestChannel$=WARN 63 | 64 | log4j.logger.kafka.controller=TRACE 65 | 66 | log4j.logger.kafka.log.LogCleaner=INFO 67 | 68 | log4j.logger.state.change.logger=TRACE 69 | 70 | log4j.logger.kafka.authorizer.logger=INFO 71 | server.config: >- 72 | ############################## 73 | 74 | ############################## 75 | 76 | # This file is automatically generated by the Strimzi Cluster Operator 77 | 78 | # Any changes to this file will be ignored and overwritten! 79 | 80 | ############################## 81 | 82 | ############################## 83 | 84 | 85 | ########## 86 | 87 | # Broker ID 88 | 89 | ########## 90 | 91 | broker.id=${STRIMZI_BROKER_ID} 92 | 93 | 94 | ########## 95 | 96 | # Zookeeper 97 | 98 | ########## 99 | 100 | zookeeper.connect=my-cluster-zookeeper-client:2181 101 | 102 | zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty 103 | 104 | zookeeper.ssl.client.enable=true 105 | 106 | zookeeper.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 107 | 108 | zookeeper.ssl.keystore.password=${CERTS_STORE_PASSWORD} 109 | 110 | zookeeper.ssl.keystore.type=PKCS12 111 | 112 | zookeeper.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 113 | 114 | zookeeper.ssl.truststore.password=${CERTS_STORE_PASSWORD} 115 | 116 | zookeeper.ssl.truststore.type=PKCS12 117 | 118 | 119 | ########## 120 | 121 | # Kafka message logs configuration 122 | 123 | ########## 124 | 125 | log.dirs=/var/lib/kafka/data/kafka-log${STRIMZI_BROKER_ID} 126 | 127 | 128 | ########## 129 | 130 | # Replication listener 131 | 132 | ########## 133 | 134 | listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 135 | 136 | listener.name.replication-9091.ssl.keystore.password=${CERTS_STORE_PASSWORD} 137 | 138 | listener.name.replication-9091.ssl.keystore.type=PKCS12 139 | 140 | listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12 141 | 142 | listener.name.replication-9091.ssl.truststore.password=${CERTS_STORE_PASSWORD} 143 | 144 | listener.name.replication-9091.ssl.truststore.type=PKCS12 145 | 146 | listener.name.replication-9091.ssl.client.auth=required 147 | 148 | 149 | ########## 150 | 151 | # Listener configuration: PLAIN-9092 152 | 153 | ########## 154 | 155 | 156 | ########## 157 | 158 | # Listener configuration: TLS-9093 159 | 160 | ########## 161 | 162 | listener.name.tls-9093.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12 163 | 164 | listener.name.tls-9093.ssl.keystore.password=${CERTS_STORE_PASSWORD} 165 | 166 | listener.name.tls-9093.ssl.keystore.type=PKCS12 167 | 168 | 169 | 170 | ########## 171 | 172 | # Common listener configuration 173 | 174 | ########## 175 | 176 | listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092,TLS-9093://0.0.0.0:9093 177 | 178 | advertised.listeners=REPLICATION-9091://my-cluster-kafka-${STRIMZI_BROKER_ID}.my-cluster-kafka-brokers.kafka.svc:9091,PLAIN-9092://${STRIMZI_PLAIN_9092_ADVERTISED_HOSTNAME}:${STRIMZI_PLAIN_9092_ADVERTISED_PORT},TLS-9093://${STRIMZI_TLS_9093_ADVERTISED_HOSTNAME}:${STRIMZI_TLS_9093_ADVERTISED_PORT} 179 | 180 | listener.security.protocol.map=REPLICATION-9091:SSL,PLAIN-9092:PLAINTEXT,TLS-9093:SSL 181 | 182 | inter.broker.listener.name=REPLICATION-9091 183 | 184 | sasl.enabled.mechanisms= 185 | 186 | ssl.secure.random.implementation=SHA1PRNG 187 | 188 | ssl.endpoint.identification.algorithm=HTTPS 189 | 190 | 191 | ########## 192 | 193 | # User provided configuration 194 | 195 | ########## 196 | 197 | log.message.format.version=2.6 198 | 199 | offsets.topic.replication.factor=3 200 | 201 | transaction.state.log.min.isr=2 202 | 203 | transaction.state.log.replication.factor=3 204 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-connect-connector-file-stream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaConnector 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-connect-cluster 6 | name: local-file-source 7 | spec: 8 | class: FileStreamSource 9 | config: 10 | file: test.txt 11 | topic: connect-test 12 | tasksMax: 1 13 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-connect-connector-twitter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaConnector 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-connect-cluster 6 | name: twitter-source-connector 7 | spec: 8 | class: com.github.jcustenborder.kafka.connect.twitter.TwitterSourceConnector 9 | config: 10 | filter.keywords: kafka 11 | kafka.delete.topic: connect-twitter-delete 12 | kafka.status.topic: connect-twitter-status 13 | process.deletes: false 14 | twitter.oauth.accessToken: anaccesstoken 15 | twitter.oauth.accessTokenSecret: anaccesstokensecret 16 | twitter.oauth.consumerKey: aconsumerkey 17 | twitter.oauth.consumerSecret: aconsumersecret 18 | tasksMax: 1 19 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-connect-connector-twitter_with_config_change.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaConnector 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-connect-cluster 6 | name: twitter-source-connector 7 | spec: 8 | class: com.github.jcustenborder.kafka.connect.twitter.TwitterSourceConnector 9 | config: 10 | process.deletes: true 11 | test.config: test 12 | tasksMax: 2 13 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-connect.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaConnect 3 | metadata: 4 | annotations: 5 | strimzi.io/use-connector-resources: 'true' 6 | name: my-connect-cluster 7 | spec: 8 | bootstrapServers: my-cluster-kafka-bootstrap:9092 9 | build: 10 | output: 11 | image: quay.io/systemcraftsman/test-connect-cluster:latest 12 | pushSecret: my-connect-cluster-push-secret 13 | type: docker 14 | plugins: 15 | - artifacts: 16 | - type: tgz 17 | url: https://repo1.maven.org/maven2/io/debezium/debezium-connector-postgres/1.3.1.Final/debezium-connector-postgres-1.3.1.Final-plugin.tar.gz 18 | name: connector-1 19 | - artifacts: 20 | - type: tgz 21 | url: https://github.com/jcustenborder/kafka-connect-twitter/releases/download/0.2.26/kafka-connect-twitter-0.2.26.tar.gz 22 | name: connector-2 23 | config: 24 | config.storage.replication.factor: 1 25 | config.storage.topic: connect-cluster-configs 26 | key.converter: org.apache.kafka.connect.json.JsonConverter 27 | key.converter.schemas.enable: true 28 | offset.storage.replication.factor: 1 29 | offset.storage.topic: connect-cluster-offsets 30 | status.storage.replication.factor: 1 31 | status.storage.topic: connect-cluster-status 32 | value.converter: org.apache.kafka.connect.json.JsonConverter 33 | value.converter.schemas.enable: true 34 | replicas: 1 35 | version: 3.7.0 36 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-connect_with_image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaConnect 3 | metadata: 4 | annotations: 5 | strimzi.io/use-connector-resources: 'true' 6 | name: my-connect-cluster 7 | spec: 8 | bootstrapServers: my-cluster-kafka-bootstrap:9092 9 | config: 10 | config.storage.replication.factor: 1 11 | config.storage.topic: connect-cluster-configs 12 | key.converter: org.apache.kafka.connect.json.JsonConverter 13 | key.converter.schemas.enable: true 14 | offset.storage.replication.factor: 1 15 | offset.storage.topic: connect-cluster-offsets 16 | status.storage.replication.factor: 1 17 | status.storage.topic: connect-cluster-status 18 | value.converter: org.apache.kafka.connect.json.JsonConverter 19 | value.converter.schemas.enable: true 20 | image: quay.io/systemcraftsman/test-connect-cluster:latest 21 | replicas: 1 22 | version: 3.7.0 23 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-connect_with_three_replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaConnect 3 | metadata: 4 | annotations: 5 | strimzi.io/use-connector-resources: 'true' 6 | name: my-connect-cluster 7 | spec: 8 | bootstrapServers: my-cluster-kafka-bootstrap:9092 9 | build: 10 | output: 11 | image: quay.io/systemcraftsman/test-connect-cluster:latest 12 | pushSecret: my-connect-cluster-push-secret 13 | type: docker 14 | plugins: 15 | - artifacts: 16 | - type: tgz 17 | url: https://repo1.maven.org/maven2/io/debezium/debezium-connector-postgres/1.3.1.Final/debezium-connector-postgres-1.3.1.Final-plugin.tar.gz 18 | name: connector-1 19 | - artifacts: 20 | - type: tgz 21 | url: https://github.com/jcustenborder/kafka-connect-twitter/releases/download/0.2.26/kafka-connect-twitter-0.2.26.tar.gz 22 | name: connector-2 23 | config: 24 | config.storage.replication.factor: 1 25 | config.storage.topic: connect-cluster-configs 26 | key.converter: org.apache.kafka.connect.json.JsonConverter 27 | key.converter.schemas.enable: true 28 | offset.storage.replication.factor: 1 29 | offset.storage.topic: connect-cluster-offsets 30 | status.storage.replication.factor: 1 31 | status.storage.topic: connect-cluster-status 32 | value.converter: org.apache.kafka.connect.json.JsonConverter 33 | value.converter.schemas.enable: true 34 | replicas: 3 35 | version: 3.7.0 36 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-connect_with_zip_jar_plugins.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaConnect 3 | metadata: 4 | annotations: 5 | strimzi.io/use-connector-resources: 'true' 6 | name: my-connect-cluster 7 | spec: 8 | bootstrapServers: my-cluster-kafka-bootstrap:9092 9 | build: 10 | output: 11 | image: quay.io/systemcraftsman/test-connect-cluster:latest 12 | pushSecret: my-connect-cluster-push-secret 13 | type: docker 14 | plugins: 15 | - artifacts: 16 | - type: zip 17 | url: https://test.com/file.zip 18 | name: connector-1 19 | - artifacts: 20 | - type: jar 21 | url: https://test.com/file.jar 22 | name: connector-2 23 | config: 24 | config.storage.replication.factor: 1 25 | config.storage.topic: connect-cluster-configs 26 | key.converter: org.apache.kafka.connect.json.JsonConverter 27 | key.converter.schemas.enable: true 28 | offset.storage.replication.factor: 1 29 | offset.storage.topic: connect-cluster-offsets 30 | status.storage.replication.factor: 1 31 | status.storage.topic: connect-cluster-status 32 | value.converter: org.apache.kafka.connect.json.JsonConverter 33 | value.converter.schemas.enable: true 34 | replicas: 1 35 | version: 3.7.0 36 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster 5 | spec: 6 | kafka: 7 | version: 3.7.0 8 | replicas: 3 9 | listeners: 10 | - name: plain 11 | port: 9092 12 | type: internal 13 | tls: false 14 | - name: tls 15 | port: 9093 16 | type: internal 17 | tls: true 18 | config: 19 | offsets.topic.replication.factor: 3 20 | transaction.state.log.replication.factor: 3 21 | transaction.state.log.min.isr: 2 22 | default.replication.factor: 3 23 | inter.broker.protocol.version: '3.7' 24 | min.insync.replicas: 2 25 | storage: 26 | type: ephemeral 27 | zookeeper: 28 | replicas: 3 29 | storage: 30 | type: ephemeral 31 | entityOperator: 32 | topicOperator: {} 33 | userOperator: {} 34 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral_name_updated.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster-with-new-name 5 | spec: 6 | entityOperator: 7 | topicOperator: {} 8 | userOperator: {} 9 | kafka: 10 | config: 11 | default.replication.factor: 3 12 | inter.broker.protocol.version: '3.7' 13 | min.insync.replicas: 2 14 | offsets.topic.replication.factor: 3 15 | transaction.state.log.min.isr: 2 16 | transaction.state.log.replication.factor: 3 17 | listeners: 18 | - name: plain 19 | port: 9092 20 | tls: false 21 | type: internal 22 | - name: tls 23 | port: 9093 24 | tls: true 25 | type: internal 26 | replicas: 3 27 | storage: 28 | type: ephemeral 29 | version: 3.7.0 30 | zookeeper: 31 | replicas: 3 32 | storage: 33 | type: ephemeral 34 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral_two_additional_configs_deleted.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster 5 | spec: 6 | entityOperator: 7 | topicOperator: {} 8 | userOperator: {} 9 | kafka: 10 | config: 11 | default.replication.factor: 3 12 | inter.broker.protocol.version: '3.7' 13 | min.insync.replicas: 2 14 | offsets.topic.replication.factor: 3 15 | transaction.state.log.min.isr: 2 16 | transaction.state.log.replication.factor: 3 17 | listeners: 18 | - name: plain 19 | port: 9092 20 | tls: false 21 | type: internal 22 | - name: tls 23 | port: 9093 24 | tls: true 25 | type: internal 26 | replicas: 3 27 | storage: 28 | type: ephemeral 29 | version: 3.7.0 30 | zookeeper: 31 | replicas: 3 32 | storage: 33 | type: ephemeral 34 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral_with_one_additional_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster 5 | spec: 6 | entityOperator: 7 | topicOperator: {} 8 | userOperator: {} 9 | kafka: 10 | config: 11 | default.replication.factor: 3 12 | inter.broker.protocol.version: '3.7' 13 | min.insync.replicas: 2 14 | offsets.topic.replication.factor: 3 15 | transaction.state.log.min.isr: 2 16 | transaction.state.log.replication.factor: 3 17 | unclean.leader.election.enable: true 18 | listeners: 19 | - name: plain 20 | port: 9092 21 | tls: false 22 | type: internal 23 | - name: tls 24 | port: 9093 25 | tls: true 26 | type: internal 27 | replicas: 3 28 | storage: 29 | type: ephemeral 30 | version: 3.7.0 31 | zookeeper: 32 | replicas: 3 33 | storage: 34 | type: ephemeral 35 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral_with_one_replica.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster-with-new-name 5 | spec: 6 | entityOperator: 7 | topicOperator: {} 8 | userOperator: {} 9 | kafka: 10 | config: 11 | default.replication.factor: 1 12 | inter.broker.protocol.version: '3.7' 13 | min.insync.replicas: 1 14 | offsets.topic.replication.factor: 1 15 | transaction.state.log.min.isr: 1 16 | transaction.state.log.replication.factor: 1 17 | listeners: 18 | - name: plain 19 | port: 9092 20 | tls: false 21 | type: internal 22 | - name: tls 23 | port: 9093 24 | tls: true 25 | type: internal 26 | replicas: 1 27 | storage: 28 | type: ephemeral 29 | version: 3.7.0 30 | zookeeper: 31 | replicas: 3 32 | storage: 33 | type: ephemeral 34 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral_with_one_replica_one_zk_replica.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster-with-new-name 5 | spec: 6 | entityOperator: 7 | topicOperator: {} 8 | userOperator: {} 9 | kafka: 10 | config: 11 | default.replication.factor: 1 12 | inter.broker.protocol.version: '3.7' 13 | min.insync.replicas: 1 14 | offsets.topic.replication.factor: 1 15 | transaction.state.log.min.isr: 1 16 | transaction.state.log.replication.factor: 1 17 | listeners: 18 | - name: plain 19 | port: 9092 20 | tls: false 21 | type: internal 22 | - name: tls 23 | port: 9093 24 | tls: true 25 | type: internal 26 | replicas: 1 27 | storage: 28 | type: ephemeral 29 | version: 3.7.0 30 | zookeeper: 31 | replicas: 1 32 | storage: 33 | type: ephemeral 34 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral_with_two_additional_configs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster 5 | spec: 6 | entityOperator: 7 | topicOperator: {} 8 | userOperator: {} 9 | kafka: 10 | config: 11 | default.replication.factor: 3 12 | inter.broker.protocol.version: '3.7' 13 | log.retention.hours: 168 14 | min.insync.replicas: 2 15 | offsets.topic.replication.factor: 3 16 | transaction.state.log.min.isr: 2 17 | transaction.state.log.replication.factor: 3 18 | unclean.leader.election.enable: true 19 | listeners: 20 | - name: plain 21 | port: 9092 22 | tls: false 23 | type: internal 24 | - name: tls 25 | port: 9093 26 | tls: true 27 | type: internal 28 | replicas: 3 29 | storage: 30 | type: ephemeral 31 | version: 3.7.0 32 | zookeeper: 33 | replicas: 3 34 | storage: 35 | type: ephemeral 36 | -------------------------------------------------------------------------------- /tests/files/yaml/kafka-ephemeral_with_two_replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | name: my-cluster-with-new-name 5 | spec: 6 | entityOperator: 7 | topicOperator: {} 8 | userOperator: {} 9 | kafka: 10 | config: 11 | default.replication.factor: 2 12 | inter.broker.protocol.version: '3.7' 13 | min.insync.replicas: 1 14 | offsets.topic.replication.factor: 2 15 | transaction.state.log.min.isr: 1 16 | transaction.state.log.replication.factor: 2 17 | listeners: 18 | - name: plain 19 | port: 9092 20 | tls: false 21 | type: internal 22 | - name: tls 23 | port: 9093 24 | tls: true 25 | type: internal 26 | replicas: 2 27 | storage: 28 | type: ephemeral 29 | version: 3.7.0 30 | zookeeper: 31 | replicas: 3 32 | storage: 33 | type: ephemeral 34 | -------------------------------------------------------------------------------- /tests/files/yaml/kubeconfig: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | certificate-authority-data: testauthdata 5 | server: localhost:6443 6 | name: DEV 7 | contexts: 8 | - context: 9 | cluster: DEV 10 | user: testuser 11 | name: testcontext 12 | current-context: testcontext 13 | kind: Config 14 | preferences: {} 15 | users: 16 | - name: testuser 17 | user: 18 | client-certificate-data: testcertdata 19 | client-key-data: testkeydata 20 | -------------------------------------------------------------------------------- /tests/files/yaml/topic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-topic 7 | spec: 8 | config: 9 | retention.ms: 7200000 10 | segment.bytes: 1073741824 11 | partitions: 12 12 | replicas: 3 13 | -------------------------------------------------------------------------------- /tests/files/yaml/topic_with_one_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-topic 7 | spec: 8 | config: 9 | min.insync.replicas: 3 10 | retention.ms: 7200000 11 | segment.bytes: 1073741824 12 | partitions: 24 13 | replicas: 3 14 | -------------------------------------------------------------------------------- /tests/files/yaml/topic_with_two_configs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-topic 7 | spec: 8 | config: 9 | cleanup.policy: compact 10 | min.insync.replicas: 3 11 | retention.ms: 7200000 12 | segment.bytes: 1073741824 13 | partitions: 24 14 | replicas: 3 15 | -------------------------------------------------------------------------------- /tests/files/yaml/topic_without_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-topic 7 | spec: 8 | config: 9 | retention.ms: 7200000 10 | segment.bytes: 1073741824 11 | partitions: 24 12 | replicas: 3 13 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_authentication_scram.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: scram-sha-512 10 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_authentication_tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: tls 10 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_authorization_with_one_topic_acl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: tls 10 | authorization: 11 | acls: 12 | - host: '*' 13 | operation: Read 14 | resource: 15 | name: my-topic 16 | patternType: literal 17 | type: topic 18 | type: allow 19 | type: simple 20 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_authorization_with_one_topic_and_one_group_acls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: tls 10 | authorization: 11 | acls: 12 | - host: '*' 13 | operation: Read 14 | resource: 15 | name: my-topic 16 | patternType: literal 17 | type: topic 18 | type: allow 19 | - host: '*' 20 | operation: Read 21 | resource: 22 | name: my-group 23 | patternType: literal 24 | type: group 25 | type: allow 26 | type: simple 27 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_authorization_with_three_topic_acls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: tls 10 | authorization: 11 | acls: 12 | - host: '*' 13 | operation: Read 14 | resource: 15 | name: my-topic 16 | patternType: literal 17 | type: topic 18 | type: allow 19 | - host: '*' 20 | operation: Write 21 | resource: 22 | name: my-topic 23 | patternType: literal 24 | type: topic 25 | type: allow 26 | - host: '*' 27 | operation: Describe 28 | resource: 29 | name: my-topic 30 | patternType: literal 31 | type: topic 32 | type: allow 33 | type: simple 34 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_authorization_with_two_topic_acls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: tls 10 | authorization: 11 | acls: 12 | - host: '*' 13 | operation: Read 14 | resource: 15 | name: my-topic 16 | patternType: literal 17 | type: topic 18 | type: allow 19 | - host: '*' 20 | operation: Write 21 | resource: 22 | name: my-topic 23 | patternType: literal 24 | type: topic 25 | type: allow 26 | type: simple 27 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_authorization_with_two_topic_and_one_group_acls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: tls 10 | authorization: 11 | acls: 12 | - host: '*' 13 | operation: Read 14 | resource: 15 | name: my-topic 16 | patternType: literal 17 | type: topic 18 | type: allow 19 | - host: '*' 20 | operation: Write 21 | resource: 22 | name: my-topic 23 | patternType: literal 24 | type: topic 25 | type: allow 26 | - host: '*' 27 | operation: Read 28 | resource: 29 | name: my-group 30 | patternType: literal 31 | type: group 32 | type: allow 33 | type: simple 34 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_one_quota.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: scram-sha-512 10 | quotas: 11 | requestPercentage: 55 12 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_quotas_empty.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: scram-sha-512 10 | quotas: {} 11 | -------------------------------------------------------------------------------- /tests/files/yaml/user_with_two_quotas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaUser 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: my-user 7 | spec: 8 | authentication: 9 | type: scram-sha-512 10 | quotas: 11 | consumerByteRate: 2097152 12 | requestPercentage: 55 13 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from unittest import TestCase, mock 3 | 4 | from kfk.config import _get_processor_type 5 | 6 | 7 | def os_uname_mock(p_name_machine): 8 | OSUname = namedtuple("OSUname", ["machine"]) 9 | return OSUname(machine=p_name_machine) 10 | 11 | 12 | class TestKfkConfig(TestCase): 13 | @mock.patch("kfk.config._is_64_bit") 14 | @mock.patch("kfk.config.platform.uname") 15 | def test_processor_type_armhf_32bit(self, mock_os_uname, mock_is_64_bit): 16 | mock_os_uname.return_value = os_uname_mock("armhf") 17 | mock_is_64_bit.return_value = False 18 | assert _get_processor_type() == "arm" 19 | 20 | @mock.patch("kfk.config._is_64_bit") 21 | @mock.patch("kfk.config.platform.uname") 22 | def test_processor_type_armhf_64bit(self, mock_os_uname, mock_is_64_bit): 23 | mock_os_uname.return_value = os_uname_mock("armhf") 24 | mock_is_64_bit.return_value = True 25 | assert _get_processor_type() == "arm64" 26 | 27 | @mock.patch("kfk.config._is_64_bit") 28 | @mock.patch("kfk.config.platform.uname") 29 | def test_processor_type_aarch64_64bit(self, mock_os_uname, mock_is_64_bit): 30 | mock_os_uname.return_value = os_uname_mock("aarch64") 31 | mock_is_64_bit.return_value = True 32 | assert _get_processor_type() == "arm64" 33 | 34 | @mock.patch("kfk.config._is_64_bit") 35 | @mock.patch("kfk.config.os.uname") 36 | def test_processor_type_x86_64_64bit(self, mock_os_uname, mock_is_64_bit): 37 | mock_os_uname.return_value = os_uname_mock("x86_64") 38 | mock_is_64_bit.return_value = True 39 | assert _get_processor_type() == "amd64" 40 | 41 | @mock.patch("kfk.config._is_64_bit") 42 | @mock.patch("kfk.config.os.uname") 43 | def test_processor_type_x86_32bit(self, mock_os_uname, mock_is_64_bit): 44 | mock_os_uname.return_value = os_uname_mock("x86") 45 | mock_is_64_bit.return_value = False 46 | assert _get_processor_type() == "386" 47 | -------------------------------------------------------------------------------- /tests/test_connect_connectors_command.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase, mock 2 | 3 | from click.testing import CliRunner 4 | 5 | from kfk.commands.connect.connectors import connect 6 | from kfk.kubectl_command_builder import Kubectl 7 | 8 | 9 | class TestKfkConnectors(TestCase): 10 | def setUp(self): 11 | self.runner = CliRunner() 12 | self.cluster = "my-connect-cluster" 13 | self.namespace = "kafka" 14 | self.connector_config_file = "tests/files/twitter-connector.properties" 15 | self.connector = "twitter-source-connector" 16 | 17 | def test_no_option(self): 18 | result = self.runner.invoke( 19 | connect, 20 | [ 21 | "connectors", 22 | self.connector_config_file, 23 | "-c", 24 | self.cluster, 25 | "-n", 26 | self.namespace, 27 | ], 28 | ) 29 | assert result.exit_code == 1 30 | assert "Missing options: kfk connectors" in result.output 31 | 32 | @mock.patch("kfk.commands.connect.connectors.os") 33 | def test_list_connectors(self, mock_os): 34 | result = self.runner.invoke( 35 | connect, ["connectors", "--list", "-c", self.cluster, "-n", self.namespace] 36 | ) 37 | assert result.exit_code == 0 38 | mock_os.system.assert_called_with( 39 | Kubectl() 40 | .get() 41 | .kafkaconnectors() 42 | .label("strimzi.io/cluster={cluster}") 43 | .namespace(self.namespace) 44 | .build() 45 | .format(cluster=self.cluster) 46 | ) 47 | 48 | @mock.patch("kfk.commands.connect.connectors.os") 49 | def test_describe_connector(self, mock_os): 50 | result = self.runner.invoke( 51 | connect, 52 | [ 53 | "connectors", 54 | "--describe", 55 | "--connector", 56 | self.connector, 57 | "-c", 58 | self.cluster, 59 | "-n", 60 | self.namespace, 61 | ], 62 | ) 63 | assert result.exit_code == 0 64 | mock_os.system.assert_called_with( 65 | Kubectl() 66 | .describe() 67 | .kafkaconnectors(self.connector) 68 | .namespace(self.namespace) 69 | .build() 70 | ) 71 | 72 | @mock.patch("kfk.commands.connect.connectors.os") 73 | def test_describe_connector_output_yaml(self, mock_os): 74 | result = self.runner.invoke( 75 | connect, 76 | [ 77 | "connectors", 78 | "--describe", 79 | "--connector", 80 | self.connector, 81 | "-c", 82 | self.cluster, 83 | "-n", 84 | self.namespace, 85 | "-o", 86 | "yaml", 87 | ], 88 | ) 89 | assert result.exit_code == 0 90 | mock_os.system.assert_called_with( 91 | Kubectl() 92 | .get() 93 | .kafkaconnectors(self.connector) 94 | .namespace(self.namespace) 95 | .output("yaml") 96 | .build() 97 | ) 98 | 99 | @mock.patch("kfk.commands.connect.connectors.delete_using_yaml") 100 | def test_delete_connector(self, mock_delete_using_yaml): 101 | result = self.runner.invoke( 102 | connect, 103 | [ 104 | "connectors", 105 | "--delete", 106 | "--connector", 107 | self.connector, 108 | "-c", 109 | self.cluster, 110 | "-n", 111 | self.namespace, 112 | ], 113 | ) 114 | 115 | assert result.exit_code == 0 116 | 117 | mock_delete_using_yaml.assert_called_once() 118 | 119 | def test_create_connector_without_config_file(self): 120 | result = self.runner.invoke( 121 | connect, 122 | ["connectors", "--create", "-c", self.cluster, "-n", self.namespace], 123 | ) 124 | 125 | assert result.exit_code == 2 126 | 127 | @mock.patch("kfk.commands.connect.connectors.create_temp_file") 128 | @mock.patch("kfk.commands.connect.connectors.create_using_yaml") 129 | def test_create_connector(self, mock_create_using_yaml, mock_create_temp_file): 130 | result = self.runner.invoke( 131 | connect, 132 | [ 133 | "connectors", 134 | "--create", 135 | self.connector_config_file, 136 | "-c", 137 | self.cluster, 138 | "-n", 139 | self.namespace, 140 | ], 141 | ) 142 | 143 | assert result.exit_code == 0 144 | 145 | with open("tests/files/yaml/kafka-connect-connector-twitter.yaml") as file: 146 | expected_connector_yaml = file.read() 147 | result_connector_yaml = mock_create_temp_file.call_args[0][0] 148 | assert expected_connector_yaml == result_connector_yaml 149 | 150 | mock_create_using_yaml.assert_called_once() 151 | 152 | def test_alter_connector_without_config_file(self): 153 | result = self.runner.invoke( 154 | connect, ["connectors", "--alter", "-c", self.cluster, "-n", self.namespace] 155 | ) 156 | 157 | assert result.exit_code == 2 158 | 159 | @mock.patch("kfk.commands.connect.connectors.create_temp_file") 160 | @mock.patch("kfk.commons.get_resource_yaml") 161 | @mock.patch("kfk.commands.connect.connectors.replace_using_yaml") 162 | def test_alter_connector( 163 | self, mock_replace_using_yaml, mock_get_resource_yaml, mock_create_temp_file 164 | ): 165 | with open("tests/files/yaml/kafka-connect-connector-twitter.yaml") as file: 166 | connector_yaml = file.read() 167 | mock_get_resource_yaml.return_value = connector_yaml 168 | 169 | result = self.runner.invoke( 170 | connect, 171 | [ 172 | "connectors", 173 | "--alter", 174 | "tests/files/twitter_connector_with_config_change.properties", 175 | "-c", 176 | self.cluster, 177 | "-n", 178 | self.namespace, 179 | ], 180 | ) 181 | 182 | assert result.exit_code == 0 183 | 184 | with open( 185 | "tests/files/yaml/kafka-connect-connector-" 186 | "twitter_with_config_change.yaml" 187 | ) as file: 188 | expected_connector_yaml = file.read() 189 | result_connector_yaml = mock_create_temp_file.call_args[0][0] 190 | assert expected_connector_yaml == result_connector_yaml 191 | 192 | mock_replace_using_yaml.assert_called_once() 193 | -------------------------------------------------------------------------------- /tests/test_console_command.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase, mock 2 | 3 | from click.testing import CliRunner 4 | 5 | from kfk.commands.console import kfk 6 | from kfk.kubectl_command_builder import Kubectl 7 | 8 | 9 | class TestKfkConsole(TestCase): 10 | def setUp(self): 11 | self.runner = CliRunner() 12 | self.cluster = "my-cluster" 13 | self.namespace = "kafka" 14 | self.topic = "my-topic" 15 | 16 | @mock.patch("kfk.commands.console.os") 17 | def test_console_consumer(self, mock_os): 18 | result = self.runner.invoke( 19 | kfk, 20 | [ 21 | "console-consumer", 22 | "--topic", 23 | self.topic, 24 | "-c", 25 | self.cluster, 26 | "-n", 27 | self.namespace, 28 | ], 29 | ) 30 | assert result.exit_code == 0 31 | 32 | native_command = ( 33 | "bin/kafka-console-consumer.sh --bootstrap-server" 34 | " my-cluster-kafka-brokers:9092 --topic {topic} " 35 | ) 36 | mock_os.system.assert_called_with( 37 | Kubectl() 38 | .exec("-it", "{cluster}-kafka-0") 39 | .container("kafka") 40 | .namespace(self.namespace) 41 | .exec_command(native_command) 42 | .build() 43 | .format(cluster=self.cluster, topic=self.topic) 44 | ) 45 | 46 | @mock.patch("kfk.commons.transfer_file_to_container") 47 | @mock.patch("kfk.commands.console.os") 48 | def test_console_consumer_with_consumer_config( 49 | self, mock_os, mock_transfer_file_to_container 50 | ): 51 | result = self.runner.invoke( 52 | kfk, 53 | [ 54 | "console-consumer", 55 | "--topic", 56 | self.topic, 57 | "--consumer.config", 58 | "tests/files/client.properties", 59 | "-c", 60 | self.cluster, 61 | "-n", 62 | self.namespace, 63 | ], 64 | ) 65 | assert result.exit_code == 0 66 | 67 | native_command = ( 68 | "bin/kafka-console-consumer.sh --bootstrap-server" 69 | " {cluster}-kafka-brokers:9093 --topic {topic} --consumer.config" 70 | " /tmp/client.properties;rm -rf /tmp/truststore.jks;rm -rf /tmp/user.p12;rm" 71 | " -rf /tmp/client.properties;" 72 | ) 73 | mock_os.system.assert_called_with( 74 | Kubectl() 75 | .exec("-it", "{cluster}-kafka-0") 76 | .container("kafka") 77 | .namespace(self.namespace) 78 | .exec_command(native_command) 79 | .build() 80 | .format(cluster=self.cluster, topic=self.topic) 81 | ) 82 | 83 | @mock.patch("kfk.commands.console.os") 84 | def test_console_consumer_with_from_beginning(self, mock_os): 85 | from_beginning = True 86 | result = self.runner.invoke( 87 | kfk, 88 | [ 89 | "console-consumer", 90 | "--topic", 91 | self.topic, 92 | "-c", 93 | self.cluster, 94 | "-n", 95 | self.namespace, 96 | "--from-beginning", 97 | ], 98 | ) 99 | assert result.exit_code == 0 100 | 101 | native_command = ( 102 | "bin/kafka-console-consumer.sh --bootstrap-server" 103 | " my-cluster-kafka-brokers:9092 --topic {topic} {from_beginning}" 104 | ) 105 | mock_os.system.assert_called_with( 106 | Kubectl() 107 | .exec("-it", "{cluster}-kafka-0") 108 | .container("kafka") 109 | .namespace(self.namespace) 110 | .exec_command(native_command) 111 | .build() 112 | .format( 113 | cluster=self.cluster, 114 | topic=self.topic, 115 | from_beginning=(from_beginning and "--from-beginning" or ""), 116 | ) 117 | ) 118 | 119 | @mock.patch("kfk.commands.console.os") 120 | def test_console_producer(self, mock_os): 121 | result = self.runner.invoke( 122 | kfk, 123 | [ 124 | "console-producer", 125 | "--topic", 126 | self.topic, 127 | "-c", 128 | self.cluster, 129 | "-n", 130 | self.namespace, 131 | ], 132 | ) 133 | assert result.exit_code == 0 134 | native_command = ( 135 | "bin/kafka-console-producer.sh --broker-list my-cluster-kafka-brokers:9092" 136 | " --topic {topic}" 137 | ) 138 | mock_os.system.assert_called_with( 139 | Kubectl() 140 | .exec("-it", "{cluster}-kafka-0") 141 | .container("kafka") 142 | .namespace(self.namespace) 143 | .exec_command(native_command) 144 | .build() 145 | .format(cluster=self.cluster, topic=self.topic) 146 | ) 147 | 148 | @mock.patch("kfk.commons.transfer_file_to_container") 149 | @mock.patch("kfk.commands.console.os") 150 | def test_console_producer_with_producer_config( 151 | self, mock_os, mock_transfer_file_to_container 152 | ): 153 | result = self.runner.invoke( 154 | kfk, 155 | [ 156 | "console-producer", 157 | "--topic", 158 | self.topic, 159 | "--producer.config", 160 | "tests/files/client.properties", 161 | "-c", 162 | self.cluster, 163 | "-n", 164 | self.namespace, 165 | ], 166 | ) 167 | assert result.exit_code == 0 168 | native_command = ( 169 | "bin/kafka-console-producer.sh --broker-list {cluster}-kafka-brokers:9093" 170 | " --topic {topic} --producer.config /tmp/client.properties;rm -rf" 171 | " /tmp/truststore.jks;rm -rf /tmp/user.p12;rm -rf /tmp/client.properties;" 172 | ) 173 | 174 | mock_os.system.assert_called_with( 175 | Kubectl() 176 | .exec("-it", "{cluster}-kafka-0") 177 | .container("kafka") 178 | .namespace(self.namespace) 179 | .exec_command(native_command) 180 | .build() 181 | .format(cluster=self.cluster, topic=self.topic) 182 | ) 183 | -------------------------------------------------------------------------------- /tests/test_kfk.py: -------------------------------------------------------------------------------- 1 | from importlib.metadata import version 2 | from unittest import TestCase, mock 3 | 4 | from click.testing import CliRunner 5 | 6 | from kfk.config import KUBECTL_VERSION, STRIMZI_VERSION 7 | 8 | 9 | class TestKfk(TestCase): 10 | def setUp(self): 11 | self.runner = CliRunner() 12 | 13 | @mock.patch("kfk.setup.setup") 14 | def test_kfk(self, mock_setup): 15 | from kfk.main import kfk 16 | 17 | result = self.runner.invoke(kfk) 18 | assert result.exit_code == 0 19 | mock_setup.assert_called() 20 | 21 | @mock.patch("kfk.setup.setup") 22 | def test_kfk_version(self, mock_setup): 23 | from kfk.main import kfk 24 | 25 | result = self.runner.invoke(kfk, ["--version"]) 26 | assert result.exit_code == 0 27 | cli_version = version("strimzi-kafka-cli") 28 | expected_version = f"""CLI Version: {cli_version} 29 | Strimzi Version: {STRIMZI_VERSION} 30 | Kubectl Version: {KUBECTL_VERSION} 31 | """ 32 | 33 | assert result.output == expected_version 34 | -------------------------------------------------------------------------------- /tests/test_kubectl_cmd_builder.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from kfk.config import KUBECTL_PATH 4 | from kfk.kubectl_command_builder import Kubectl 5 | 6 | 7 | class TestKubectl(TestCase): 8 | def test_get(self): 9 | self.assertEqual( 10 | Kubectl().get().build(), "{kubectl} get".format(kubectl=KUBECTL_PATH) 11 | ) 12 | 13 | def test_describe(self): 14 | self.assertEqual( 15 | Kubectl().describe().build(), 16 | "{kubectl} describe".format(kubectl=KUBECTL_PATH), 17 | ) 18 | 19 | def test_exec(self): 20 | self.assertEqual( 21 | Kubectl() 22 | .exec("-it", "test-pod") 23 | .container("test-container") 24 | .namespace("test-namespace") 25 | .exec_command("echo 'test'") 26 | .build(), 27 | "{kubectl} exec -it test-pod -c test-container -n test-namespace -- bash -c" 28 | " \"echo 'test'\"".format(kubectl=KUBECTL_PATH), 29 | ) 30 | 31 | def test_kafkas_all(self): 32 | self.assertEqual( 33 | Kubectl().get().kafkas().build(), 34 | "{kubectl} get kafkas".format(kubectl=KUBECTL_PATH), 35 | ) 36 | 37 | def test_kafkas_specific(self): 38 | self.assertEqual( 39 | Kubectl().get().kafkas("my-cluster").build(), 40 | "{kubectl} get kafkas my-cluster".format(kubectl=KUBECTL_PATH), 41 | ) 42 | 43 | def test_kafkatopics_all(self): 44 | self.assertEqual( 45 | Kubectl().get().kafkatopics().build(), 46 | "{kubectl} get kafkatopics".format(kubectl=KUBECTL_PATH), 47 | ) 48 | 49 | def test_kafkatopics_specific(self): 50 | self.assertEqual( 51 | Kubectl().get().kafkatopics("my-topic").build(), 52 | "{kubectl} get kafkatopics my-topic".format(kubectl=KUBECTL_PATH), 53 | ) 54 | 55 | def test_kafkausers_all(self): 56 | self.assertEqual( 57 | Kubectl().get().kafkausers().build(), 58 | "{kubectl} get kafkausers".format(kubectl=KUBECTL_PATH), 59 | ) 60 | 61 | def test_kafkausers_specific(self): 62 | self.assertEqual( 63 | Kubectl().get().kafkausers("my-user").build(), 64 | "{kubectl} get kafkausers my-user".format(kubectl=KUBECTL_PATH), 65 | ) 66 | 67 | def test_configmap_yaml(self): 68 | self.assertEqual( 69 | Kubectl().get().configmap("my-cluster-kafka-config").output("yaml").build(), 70 | "{kubectl} get configmap my-cluster-kafka-config -o yaml".format( 71 | kubectl=KUBECTL_PATH 72 | ), 73 | ) 74 | 75 | def test_label(self): 76 | self.assertEqual( 77 | Kubectl().get().kafkatopics().label("app=test").build(), 78 | "{kubectl} get kafkatopics -l app=test".format(kubectl=KUBECTL_PATH), 79 | ) 80 | 81 | def test_labels(self): 82 | self.assertEqual( 83 | Kubectl().get().kafkatopics().label("app=test, included=true").build(), 84 | "{kubectl} get kafkatopics -l app=test, included=true".format( 85 | kubectl=KUBECTL_PATH 86 | ), 87 | ) 88 | 89 | def test_namespace(self): 90 | self.assertEqual( 91 | Kubectl().get().kafkatopics().namespace("test").build(), 92 | "{kubectl} get kafkatopics -n test".format(kubectl=KUBECTL_PATH), 93 | ) 94 | 95 | def test_namespace_all(self): 96 | self.assertEqual( 97 | Kubectl().get().kafkatopics().namespace().build(), 98 | "{kubectl} get kafkatopics --all-namespaces".format(kubectl=KUBECTL_PATH), 99 | ) 100 | 101 | def test_label_namespace(self): 102 | self.assertEqual( 103 | Kubectl().get().kafkatopics().label("app=test").namespace("test").build(), 104 | "{kubectl} get kafkatopics -l app=test -n test".format( 105 | kubectl=KUBECTL_PATH 106 | ), 107 | ) 108 | 109 | def test_cluster(self): 110 | self.assertEqual( 111 | Kubectl().get().kafkas().container("test-container").build(), 112 | "{kubectl} get kafkas -c test-container".format(kubectl=KUBECTL_PATH), 113 | ) 114 | 115 | def test_output(self): 116 | self.assertEqual( 117 | Kubectl().get().kafkas().output("yaml").build(), 118 | "{kubectl} get kafkas -o yaml".format(kubectl=KUBECTL_PATH), 119 | ) 120 | -------------------------------------------------------------------------------- /tests/test_operator_command.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase, mock 2 | 3 | from click.testing import CliRunner 4 | 5 | from kfk.commands.operator import kfk 6 | 7 | 8 | class TestKfkOperator(TestCase): 9 | def setUp(self): 10 | self.runner = CliRunner() 11 | self.cluster = "my-cluster" 12 | self.namespace = "kafka" 13 | self.installation_file_count = 28 14 | 15 | @mock.patch("kfk.commands.operator.create_using_yaml") 16 | def test_install_strimzi(self, mock_create_using_yaml): 17 | result = self.runner.invoke( 18 | kfk, ["operator", "--install", "-n", self.namespace] 19 | ) 20 | assert result.exit_code == 0 21 | assert mock_create_using_yaml.call_count == self.installation_file_count 22 | 23 | @mock.patch("kfk.commands.operator.delete_using_yaml") 24 | def test_uninstall_strimzi(self, mock_delete_using_yaml): 25 | result = self.runner.invoke( 26 | kfk, ["operator", "--uninstall", "-n", self.namespace] 27 | ) 28 | assert result.exit_code == 0 29 | assert mock_delete_using_yaml.call_count == self.installation_file_count 30 | -------------------------------------------------------------------------------- /tests/test_setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | from unittest import TestCase, mock 4 | 5 | from click.testing import CliRunner 6 | 7 | from kfk.config import KUBECTL, STRIMZI_VERSION 8 | from kfk.setup import setup 9 | 10 | 11 | class TestSetup(TestCase): 12 | def setUp(self): 13 | self.runner = CliRunner() 14 | 15 | @mock.patch("kfk.setup.os.rename") 16 | @mock.patch("kfk.setup.KUBECTL_PATH", tempfile.mkdtemp() + "/" + KUBECTL) 17 | @mock.patch("kfk.setup._download_kubectl") 18 | def test_main(self, mock_download_kubectl, mock_rename): 19 | setup() 20 | assert mock_download_kubectl.call_count == 1 21 | 22 | @mock.patch("kfk.setup.os.rename") 23 | @mock.patch("kfk.setup.KUBECTL_PATH", tempfile.mkdtemp() + "/" + KUBECTL) 24 | @mock.patch("kfk.setup._download_kubectl") 25 | def test_download_kubectl_if_not_exists(self, mock_download_kubectl, mock_rename): 26 | setup() 27 | assert mock_download_kubectl.call_count == 1 28 | 29 | @mock.patch("kfk.setup.os.rename") 30 | @mock.patch("kfk.setup.KUBECTL_VERSION", "x.x.x") 31 | @mock.patch("kfk.setup._download_kubectl") 32 | def test_update_kubectl_if_new_version_exists( 33 | self, mock_download_kubectl, mock_rename 34 | ): 35 | setup() 36 | assert mock_download_kubectl.call_count == 1 37 | 38 | @mock.patch("kfk.setup.os.rename") 39 | @mock.patch("kfk.setup.KUBECTL_VERSION", "x.x.x") 40 | @mock.patch("kfk.setup._download_kubectl") 41 | def test_update_kubectl_if_new_version_exists_with_custom_kubectl_path( 42 | self, mock_download_kubectl, mock_rename 43 | ): 44 | os.environ["STRIMZI_KAFKA_CLI_KUBECTL_PATH"] = tempfile.mkdtemp() 45 | setup() 46 | assert mock_download_kubectl.call_count == 0 47 | 48 | @mock.patch("kfk.setup.os.rename") 49 | @mock.patch("kfk.setup.KUBECTL_VERSION", "x.x.x") 50 | @mock.patch("kfk.setup._download_kubectl") 51 | def test_update_kubectl_if_new_version_exists_with_custom_kubectl_version( 52 | self, mock_download_kubectl, mock_rename 53 | ): 54 | os.environ["STRIMZI_KAFKA_CLI_KUBECTL_VERSION"] = "x.x.y" 55 | setup() 56 | assert mock_download_kubectl.call_count == 0 57 | 58 | @mock.patch("kfk.setup.os.rename") 59 | @mock.patch("kfk.setup.os.remove") 60 | @mock.patch("kfk.setup.tarfile") 61 | @mock.patch("kfk.setup.print") 62 | @mock.patch("kfk.setup.wget.download") 63 | @mock.patch("kfk.setup.STRIMZI_PATH", tempfile.mkdtemp() + "/strimzi-x.x.x") 64 | def test_download_strimzi_if_not_exists( 65 | self, mock_wget_download, mock_print, mock_tarfile, mock_remove, mock_rename 66 | ): 67 | setup() 68 | mock_print.assert_called_with( 69 | "Extracting Strimzi {version}...\n".format(version=STRIMZI_VERSION) 70 | ) 71 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from kfk.utils import convert_string_to_type, is_valid_url, snake_to_camel_case 4 | 5 | 6 | class TestUtils(TestCase): 7 | def test_convert_string_to_int(self): 8 | val_str = "12" 9 | self.assertEqual(convert_string_to_type(val_str), 12) 10 | 11 | def test_convert_string_to_float(self): 12 | val_str = "12.5" 13 | self.assertEqual(convert_string_to_type(val_str), 12.5) 14 | 15 | def test_convert_string_to_boolean(self): 16 | val_str = "true" 17 | self.assertEqual(convert_string_to_type(val_str), True) 18 | val_str = "false" 19 | self.assertEqual(convert_string_to_type(val_str), False) 20 | 21 | def test_convert_string_to_str(self): 22 | val_str = "test" 23 | self.assertEqual(convert_string_to_type(val_str), "test") 24 | 25 | def test_snake_to_camel_case(self): 26 | val_str = "this_is_the_test_string" 27 | self.assertEqual(snake_to_camel_case(val_str), "thisIsTheTestString") 28 | 29 | def test_is_valid_url(self): 30 | not_valid_url_str = "not a valid url" 31 | valid_url_str = "https://www.systemcraftsman.com" 32 | not_valid_url_str2 = "systemcraftsman.com" 33 | not_valid_url_str3 = "systemcraftsman.com" 34 | 35 | self.assertFalse(is_valid_url(not_valid_url_str)) 36 | self.assertTrue(is_valid_url(valid_url_str)) 37 | self.assertFalse(is_valid_url(not_valid_url_str2)) 38 | self.assertFalse(is_valid_url(not_valid_url_str3)) 39 | -------------------------------------------------------------------------------- /website/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Strimzi Kafka CLI 6 | 7 | 8 | Under Construction. Please visit the following link: 9 | 10 | https://github.com/systemcraftsman/strimzi-kafka-cli 11 | 12 | 13 | --------------------------------------------------------------------------------