├── .github └── workflows │ ├── generate_docs.yml │ ├── main.yml │ └── python-publish.yml ├── .gitignore ├── LICENSE ├── README.md ├── docs └── index.html ├── examples ├── docker-compose.yml ├── oauth_example.py ├── schema │ └── protobuf │ │ ├── user.proto │ │ ├── user_helper.py │ │ └── user_pb2.py ├── test.robot ├── test_adminclient.robot ├── test_avro.robot ├── test_oauth.robot └── test_protobuf.robot ├── setup.py └── src └── ConfluentKafkaLibrary ├── __init__.py ├── admin_client.py ├── consumer.py ├── producer.py ├── serialization.py └── version.py /.github/workflows/generate_docs.yml: -------------------------------------------------------------------------------- 1 | name: Generate Docs 2 | 3 | on: 4 | workflow_run: 5 | workflows: [Upload Python Package] 6 | types: 7 | - completed 8 | # Allows you to run this workflow manually from the Actions tab 9 | workflow_dispatch: 10 | 11 | jobs: 12 | deploy: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | # This latest python way can't find the librdkafka.h files 17 | # - name: Set up Python 18 | # uses: actions/setup-python@v4 19 | # with: 20 | # python-version: '3.x' 21 | - name: Install requirements 22 | run: pip install .[all] 23 | - name: Generate keyword documentation 24 | run: python3 -m robot.libdoc -f html src/ConfluentKafkaLibrary docs/index.html 25 | - uses: stefanzweifel/git-auto-commit-action@v5 26 | with: 27 | file_pattern: docs/index.html 28 | commit_message: Add keyword documentation 29 | push_options: '--force' 30 | - name: Deploy 🚀 31 | uses: JamesIves/github-pages-deploy-action@v4.5.0 32 | with: 33 | branch: gh-pages # The branch the action should deploy to. 34 | folder: docs # The folder the action should deploy. 35 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: CI 4 | 5 | # Controls when the workflow will run 6 | on: 7 | # Triggers the workflow on push or pull request events but only for the master branch 8 | push: 9 | branches: [ master ] 10 | pull_request: 11 | branches: [ master ] 12 | 13 | # Allows you to run this workflow manually from the Actions tab 14 | workflow_dispatch: 15 | 16 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 17 | jobs: 18 | # This workflow contains a single job called "build" 19 | build: 20 | # The type of runner that the job will run on 21 | runs-on: ubuntu-latest 22 | 23 | # Steps represent a sequence of tasks that will be executed as part of the job 24 | steps: 25 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 26 | - uses: actions/checkout@v4 27 | 28 | - name: Spin up kafka 29 | run: cd examples && docker compose up -d && cd .. 30 | - name: Upgrade pip, setuptools, and wheel 31 | run: python3 -m pip install --upgrade pip setuptools wheel 32 | - name: Install python requirements 33 | run: pip install --force-reinstall .[all] 34 | - name: Check for broken dependencies 35 | run: pip check 36 | - name: Wait for services 37 | run: while [ -n "$(docker container ls -a | grep starting)" ]; do sleep 2; done; 38 | - name: Docker inspect 39 | run: docker inspect --format "{{json .State.Health.Status }}" $(docker compose -f examples/docker-compose.yml ps -q) 40 | - name: Show python version 41 | run: python3 --version 42 | - name: Execute tests 43 | run: python3 -m robot -d ./docs examples/ 44 | - name: Archive test log 45 | if: ${{ always() }} 46 | uses: actions/upload-artifact@v4 47 | with: 48 | name: log.html 49 | path: docs/log.html 50 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | release: 8 | types: [created] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: '3.x' 21 | - name: Install pypa/build 22 | run: >- 23 | python -m 24 | pip install 25 | build 26 | --user 27 | - name: Build a binary wheel and a source tarball 28 | run: >- 29 | python -m 30 | build 31 | --sdist 32 | --wheel 33 | --outdir dist/ 34 | . 35 | - name: Publish distribution 📦 to PyPI 36 | if: startsWith(github.ref, 'refs/tags') 37 | uses: pypa/gh-action-pypi-publish@release/v1 38 | with: 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | 3 | # Robot generated files 4 | log.html 5 | report.html 6 | output.xml 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | pip-wheel-metadata/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | db.sqlite3-journal 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # celery beat schedule file 101 | celerybeat-schedule 102 | 103 | # SageMath parsed files 104 | *.sage.py 105 | 106 | # Environments 107 | .env 108 | .venv 109 | env/ 110 | venv/ 111 | ENV/ 112 | env.bak/ 113 | venv.bak/ 114 | 115 | # Spyder project settings 116 | .spyderproject 117 | .spyproject 118 | 119 | # Rope project settings 120 | .ropeproject 121 | 122 | # mkdocs documentation 123 | /site 124 | 125 | # mypy 126 | .mypy_cache/ 127 | .dmypy.json 128 | dmypy.json 129 | 130 | # Pyre type checker 131 | .pyre/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 Robert Karasek 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Robot Framework - ConfluentKafkaLibrary 2 | 3 | ConfluentKafkaLibrary library is a wrapper for the [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python). 4 | 5 | ConfluentKafkaLibrary is compatible with the latest version of confluent-kafka-python, where the library versions have a 1:1 correspondence (e.g., ConfluentKafkaLibrary 1.3.0 corresponds to confluent-kafka-python 1.3.0). Bug fixes and updates are denoted by a trailing hyphen, such as `1.3.0-1`. 6 | 7 | ## Documentation 8 | 9 | The keyword documentation for ConfluentKafkaLibrary can be found [here](https://robooo.github.io/robotframework-ConfluentKafkaLibrary/) 10 | 11 | To generate the documentation, use the following command: 12 | 13 | ``` 14 | python -m robot.libdoc -f html src/ConfluentKafkaLibrary docs/index.html 15 | ``` 16 | 17 | ## Installation 18 | 19 | To install the library, run the following command: 20 | 21 | ``` 22 | pip install robotframework-confluentkafkalibrary 23 | ``` 24 | 25 | Extra packages: 26 | * `[avro]` = `['fastavro >= 1.3.2', 'avro >= 1.11.1']` 27 | * `[json]` = `['jsonschema >= 3.2.0', 'pyrsistent >= 0.20.0']` 28 | * `[protobuf]` = `['protobuf >= 4.22.0', 'googleapis-common-protos >= 1.66.0']` 29 | * `[schemaregistry]` = `['httpx>=0.26', 'cachetools >= 5.5.0', 'attrs >= 24.3.0']` 30 | 31 | * To install all dependencies use `[all]` extension like: 32 | 33 | ``` 34 | pip install robotframework-confluentkafkalibrary[all] 35 | ``` 36 | 37 | ## Usage 38 | 39 | In most cases, you can refer to the [confluent-kafka-python documentation](https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html) for guidance. Every keyword in ConfluentKafkaLibrary is designed to match the corresponding Python functions. If you are unsure about the pre-configured keywords, please visit the [robotframework-ConfluentKafkaLibrary documentation](https://robooo.github.io/robotframework-ConfluentKafkaLibrary/). The Kafka team maintains the up-to-date documentation for configuration properties and their values [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). 40 | 41 | * You can find basic usage examples in the [./examples/test.robot](./examples/test.robot) 42 | * For more complex examples, such as handling byte data from a topic, using multiple consumers, or running threaded avro consumers, please refer to the [documentation](https://robooo.github.io/robotframework-ConfluentKafkaLibrary/#Examples). 43 | 44 | ## Testing 45 | 46 | * The library is tested using black-box tests written in Robot Framework. 47 | * You can find the test files in the [examples/ directory](./examples) directory. 48 | * For testing, a dockerized enterprise Kafka platform with schema registry support and REST proxy is used. The platform is deployed and tested for each pull request and merge to the master branch. 49 | * See [docker-compose.yml](./examples/docker-compose.yml) file with the necessary configuration. 50 | * Tests are divided into the following files: 51 | * test.robot - Basic tests to verify functionality of the Consumer and Producer. 52 | * test_adminclient.robot - Verifications of admin client functionality. 53 | * test_avro.robot - Verifications of avro and serializers functionality. 54 | * Not executable example of oauth usage can be found [here](https://github.com/robooo/robotframework-ConfluentKafkaLibrary/blob/master/examples/test_oauth.robot#L14) 55 | * Update of deployment https://github.com/robooo/robotframework-ConfluentKafkaLibrary/issues/21 is required. 56 | * The core testing logic involves producing data to Kafka, connecting one consumer in a thread, and working with the results in specific test cases. 57 | 58 | ## Known Limitations: 59 | * Unable to install robotframework-confluentkafkalibrary on Amazon EC2 graviton instance type 60 | * see the [steps to resolve](https://github.com/robooo/robotframework-ConfluentKafkaLibrary/issues/33#issuecomment-1464644752) 61 | -------------------------------------------------------------------------------- /examples/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.6' 2 | services: 3 | zookeeper: 4 | image: confluentinc/cp-zookeeper:7.3.3 5 | hostname: zookeeper 6 | container_name: zookeeper 7 | ports: 8 | - '2181:2181' 9 | environment: 10 | ZOOKEEPER_CLIENT_PORT: 2181 11 | healthcheck: 12 | test: ['CMD-SHELL', 'nc -zv localhost 2181 && exit 0 || exit 1'] 13 | 14 | broker: 15 | image: confluentinc/cp-enterprise-kafka:7.3.3 16 | hostname: broker 17 | container_name: broker 18 | depends_on: 19 | - zookeeper 20 | ports: 21 | - '9092:9092' 22 | - '29092:29092' 23 | healthcheck: 24 | test: ['CMD-SHELL', 'nc -zv localhost 9092 && exit 0 || exit 1'] 25 | environment: 26 | KAFKA_BROKER_ID: 1 27 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' 28 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 29 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092 30 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 31 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 32 | 33 | schema-registry: 34 | image: confluentinc/cp-schema-registry:7.3.3 35 | hostname: schema-registry 36 | container_name: schema-registry 37 | depends_on: 38 | - zookeeper 39 | - broker 40 | ports: 41 | - '8081:8081' 42 | healthcheck: 43 | test: ['CMD-SHELL', 'nc -zv localhost 8081 && exit 0 || exit 1'] 44 | environment: 45 | SCHEMA_REGISTRY_HOST_NAME: schema-registry 46 | SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' 47 | SCHEMA_REGISTRY_LISTENERS: 'http://0.0.0.0:8081' 48 | SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181' 49 | 50 | rest-proxy: 51 | image: confluentinc/cp-kafka-rest:7.3.3 52 | depends_on: 53 | - zookeeper 54 | - broker 55 | - schema-registry 56 | ports: 57 | - 8082:8082 58 | healthcheck: 59 | test: ['CMD-SHELL', 'nc -zv localhost 8082 && exit 0 || exit 1'] 60 | hostname: rest-proxy 61 | container_name: rest-proxy 62 | environment: 63 | KAFKA_REST_HOST_NAME: rest-proxy 64 | KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' 65 | KAFKA_REST_LISTENERS: 'http://0.0.0.0:8082' 66 | KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' 67 | -------------------------------------------------------------------------------- /examples/oauth_example.py: -------------------------------------------------------------------------------- 1 | import time 2 | import functools 3 | from robot.libraries.BuiltIn import BuiltIn 4 | 5 | 6 | def oauth_cb(oauth_config): 7 | BuiltIn().set_global_variable("${SEEN_RF_OAUTH_CB}", True) 8 | return 'token', time.time() + 300.0 9 | 10 | def get_token(oauth_config): 11 | return functools.partial(oauth_cb, oauth_config) 12 | -------------------------------------------------------------------------------- /examples/schema/protobuf/user.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package example.protobuf; 3 | 4 | message User { 5 | string name = 1; 6 | int32 number = 2; 7 | } 8 | -------------------------------------------------------------------------------- /examples/schema/protobuf/user_helper.py: -------------------------------------------------------------------------------- 1 | from user_pb2 import User 2 | from robot.api.deco import keyword 3 | 4 | @keyword("Get Type") 5 | def get_type(): 6 | return User 7 | 8 | @keyword("Create User") 9 | def create_user(name, number: int, serialize :bool=False): 10 | new_user = User(name = name, number = number) 11 | 12 | if serialize: 13 | return new_user.SerializeToString() 14 | 15 | return new_user 16 | -------------------------------------------------------------------------------- /examples/schema/protobuf/user_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: user.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf.internal import builder as _builder 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nuser.proto\x12\x10\x65xample.protobuf\"$\n\x04User\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x62\x06proto3') 17 | 18 | _globals = globals() 19 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) 20 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'user_pb2', _globals) 21 | if _descriptor._USE_C_DESCRIPTORS == False: 22 | 23 | DESCRIPTOR._options = None 24 | _globals['_USER']._serialized_start=32 25 | _globals['_USER']._serialized_end=68 26 | # @@protoc_insertion_point(module_scope) 27 | -------------------------------------------------------------------------------- /examples/test.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library Collections 4 | 5 | Suite Setup Starting Test 6 | Suite Teardown Stop Thread 7 | 8 | *** Test Cases *** 9 | Verify Topics 10 | ${group_id}= Create Consumer auto_offset_reset=earliest 11 | ${topics}= List Topics ${group_id} 12 | Dictionary Should Contain Key ${topics} ${TEST_TOPIC} 13 | 14 | ${topics_thread}= List Topics ${THREADED_GROUPID} 15 | Dictionary Should Contain Key ${topics} ${TEST_TOPIC} 16 | [Teardown] Close Consumer ${group_id} 17 | 18 | Basic Consumer 19 | ${group_id}= Create Consumer auto_offset_reset=earliest 20 | Subscribe Topic group_id=${group_id} topics=${TEST_TOPIC} 21 | ${messages}= Poll group_id=${group_id} max_records=3 decode_format=utf8 22 | ${data}= Create List Hello World {'test': 1} 23 | Lists Should Be Equal ${messages} ${data} 24 | [Teardown] Basic Teardown ${group_id} 25 | 26 | Produce Without Value 27 | ${topic_name}= Set Variable topicwithoutvaluee 28 | Produce group_id=${PRODUCER_ID} topic=${topic_name} 29 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 30 | ${group_id}= Create Consumer auto_offset_reset=earliest 31 | Subscribe Topic group_id=${group_id} topics=${topic_name} 32 | ${messages}= Poll group_id=${group_id} max_records=1 33 | Should Be Equal As Strings ${messages} [None] 34 | [Teardown] Basic Teardown ${group_id} 35 | 36 | Verify Position 37 | ${group_id}= Create Consumer 38 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${OFFSET_END} 39 | Assign To Topic Partition ${group_id} ${tp} 40 | Sleep 5sec # Need to wait for an assignment 41 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 42 | ${position_before}= Set Variable ${position[0].offset} 43 | 44 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Dummy partition=${P_ID} 45 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 46 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 47 | ${position_after_produce}= Set Variable ${position[0].offset} 48 | Should Be Equal As Integers ${position_before} ${position_after_produce} 49 | 50 | ${messages}= Poll group_id=${group_id} max_records=1 decode_format=utf8 51 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 52 | ${position_after_poll_1}= Set Variable ${position[0].offset} 53 | Should Not Be Equal As Integers ${position_after_poll_1} ${position_after_produce} 54 | 55 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Dummy partition=${P_ID} 56 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 57 | ${messages}= Poll group_id=${group_id} max_records=1 decode_format=utf8 58 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 59 | ${position_after_poll_2}= Set Variable ${position[0].offset} 60 | Should Be Equal As Integers ${position_after_poll_1 + 1} ${position_after_poll_2} 61 | [Teardown] Basic Teardown ${group_id} 62 | 63 | Consumer With Assignment To Last Message After Get Of Watermark Offsets 64 | ${group_id}= Create Consumer 65 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} 66 | ${offset}= Get Watermark Offsets ${group_id} ${tp} 67 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${offset[1]} 68 | Assign To Topic Partition ${group_id} ${tp} 69 | Prepare Data 70 | ${messages}= Poll group_id=${group_id} max_records=6 decode_format=utf8 71 | Lists Should Be Equal ${TEST_DATA} ${messages} 72 | [Teardown] Basic Teardown ${group_id} 73 | 74 | Consumer With Assignment To OFFSET_END 75 | ${group_id}= Create Consumer 76 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${OFFSET_END} 77 | Assign To Topic Partition ${group_id} ${tp} 78 | # Need to wait for an async assignment, be aware the Is Assigned could return True but 79 | # that doesn't mean assignment is completed 80 | Sleep 5sec 81 | Prepare Data 82 | ${messages}= Poll group_id=${group_id} poll_attempts=30 max_records=6 timeout=5 decode_format=utf8 83 | Lists Should Be Equal ${TEST_DATA} ${messages} 84 | [Teardown] Unassign Teardown ${group_id} 85 | 86 | Verify Test And Threaded Consumer 87 | [Setup] Clear Messages From Thread ${MAIN_THREAD} 88 | ${group_id}= Create Consumer 89 | Subscribe Topic group_id=${group_id} topics=${TEST_TOPIC} 90 | ${messages}= Poll group_id=${group_id} 91 | Prepare Data 92 | ${thread_messages}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 93 | ${messages}= Poll group_id=${group_id} max_records=6 decode_format=utf8 94 | Lists Should Be Equal ${thread_messages} ${messages} 95 | [Teardown] Run Keywords Basic Teardown ${group_id} AND 96 | ... Clear Messages From Thread ${MAIN_THREAD} 97 | 98 | Verify Clean Of Threaded Consumer Messages 99 | [Setup] Prepare Data 100 | ${thread_messages1}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 101 | Clear Messages From Thread ${MAIN_THREAD} 102 | ${thread_messages2}= Get Messages From Thread ${MAIN_THREAD} 103 | Lists Should Be Equal ${TEST_DATA} ${thread_messages1} 104 | Should Be Empty ${thread_messages2} 105 | [Teardown] Clear Messages From Thread ${MAIN_THREAD} 106 | 107 | Remove And Publish New Messages From Threaded Consumer 108 | [Setup] Prepare Data 109 | ${thread_messages1}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 110 | Clear Messages From Thread ${MAIN_THREAD} 111 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=After partition=${P_ID} 112 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Clear partition=${P_ID} 113 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 114 | Sleep 1sec # if next command is polling messages in thread we need to wait a second 115 | 116 | ${thread_messages2}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 117 | ${data}= Create List After Clear 118 | Should Be Equal ${data} ${thread_messages2} 119 | 120 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=LAST partition=${P_ID} 121 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 122 | Sleep 1sec 123 | Append To List ${data} LAST 124 | ${thread_messages2}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 125 | Should Be Equal ${TEST_DATA} ${thread_messages1} 126 | Should Be Equal ${data} ${thread_messages2} 127 | [Teardown] Clear Messages From Thread ${MAIN_THREAD} 128 | 129 | Purge Test 130 | ${producer_id}= Create Producer message.timeout.ms=${30000} 131 | Produce group_id=${producer_id} topic=${TEST_TOPIC} value=After partition=${P_ID} 132 | Produce group_id=${producer_id} topic=${TEST_TOPIC} value=Clear partition=${P_ID} 133 | 134 | Purge group_id=${producer_id} in_queue=${False} 135 | ${count}= Flush ${producer_id} timeout=${0} 136 | Should Be Equal As Integers 2 ${count} 137 | Purge group_id=${producer_id} 138 | ${count}= Flush ${producer_id} timeout=${0} 139 | Should Be Equal As Integers 0 ${count} 140 | 141 | Offsets Test 142 | ${group_id}= Create Consumer enable.auto.offset.store=${False} 143 | Subscribe Topic group_id=${group_id} topics=${TEST_TOPIC} 144 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${OFFSET_BEGINNING} 145 | ${offsets}= Create List ${tp} 146 | Run Keyword And Expect Error KafkaException: * Store Offsets group_id=${group_id} offsets=${offsets} 147 | Assign To Topic Partition ${group_id} ${tp} 148 | Sleep 5sec 149 | Store Offsets group_id=${group_id} offsets=${offsets} 150 | [Teardown] Unassign Teardown ${group_id} 151 | 152 | *** Keywords *** 153 | Starting Test 154 | Set Suite Variable ${TEST_TOPIC} test 155 | ${thread}= Start Consumer Threaded topics=${TEST_TOPIC} 156 | ${gid}= Get Thread Group Id ${thread} 157 | Log ${gid} 158 | Set Suite Variable ${THREADED_GROUPID} ${gid} 159 | Set Suite Variable ${MAIN_THREAD} ${thread} 160 | ${producer_group_id}= Create Producer 161 | Set Suite Variable ${PRODUCER_ID} ${producer_group_id} 162 | 163 | Set Suite Variable ${P_ID} ${0} 164 | Prepare Data 165 | 166 | ${topics}= List Topics ${producer_group_id} 167 | ${partitions}= Get Topic Partitions ${topics['${TEST_TOPIC}']} 168 | ${partition_id}= Set Variable ${partitions[0].id} 169 | Set Suite Variable ${P_ID} ${partition_id} 170 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${partition_id} ${OFFSET_BEGINNING} 171 | 172 | ${data}= Create List Hello World {'test': 1} {'test': 2} {'test': 3} {'test': 4} 173 | Set Suite Variable ${TEST_DATA} ${data} 174 | Prepare Data 175 | 176 | Prepare Data 177 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Hello partition=${P_ID} 178 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=World partition=${P_ID} 179 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 1} partition=${P_ID} 180 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 2} partition=${P_ID} 181 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 3} partition=${P_ID} 182 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 4} partition=${P_ID} 183 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 184 | Sleep 1sec # if next command is polling messages in thread we need to wait a second 185 | 186 | All Messages Are Delivered 187 | [Arguments] ${producer_id} 188 | ${count}= Flush ${producer_id} 189 | Log Reaming messages to be delivered: ${count} 190 | Should Be Equal As Integers ${count} 0 191 | 192 | Basic Teardown 193 | [Arguments] ${group_id} 194 | Unsubscribe ${group_id} 195 | Close Consumer ${group_id} 196 | ${groups}= Create List ${group_id} 197 | ${admin_client_id}= Create Admin Client 198 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 199 | Log ${resp} 200 | 201 | Unassign Teardown 202 | [Arguments] ${group_id} 203 | Unassign ${group_id} 204 | Close Consumer ${group_id} 205 | ${groups}= Create List ${group_id} 206 | ${admin_client_id}= Create Admin Client 207 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 208 | Log ${resp} 209 | 210 | Stop Thread 211 | ${resp}= Stop Consumer Threaded ${MAIN_THREAD} 212 | Log ${resp} -------------------------------------------------------------------------------- /examples/test_adminclient.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library Collections 4 | 5 | 6 | *** Test Cases *** 7 | AdminClient Topic Creation 8 | ${topic_names}= Create List admintesting1 admintesting2 admintesting3 9 | ${topics}= Create List 10 | FOR ${topic} IN @{topic_names} 11 | ${topic}= New Topic ${topic} num_partitions=${1} replication_factor=${1} 12 | Append To List ${topics} ${topic} 13 | END 14 | 15 | ${admin_client_id}= Create Admin Client 16 | ${results}= Create Topics group_id=${admin_client_id} new_topics=${topics} 17 | Log ${results} 18 | ${topics}= List Topics ${admin_client_id} 19 | FOR ${topic} IN @{topic_names} 20 | List Should Contain Value ${topics} ${topic} 21 | END 22 | [Teardown] Delete Topics ${admin_client_id} ${topic_names} 23 | 24 | AdminClient List Consumer Groups 25 | ${producer_group_id}= Create Producer 26 | Produce ${producer_group_id} topic=adminlisttest value=Hello partition=${0} 27 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_group_id} 28 | 29 | ${group_id}= Create Consumer auto_offset_reset=earliest 30 | Subscribe Topic ${group_id} topics=adminlisttest 31 | Sleep 2s # Wait for subscription 32 | 33 | ${admin_client_id}= Create Admin Client 34 | ${states}= Create List ${CONSUMER_GROUP_STATE_STABLE} 35 | ${groups}= List Groups ${admin_client_id} states=${states} 36 | Log ${groups} 37 | Log ${groups.valid} 38 | FOR ${group} IN @{groups.valid} 39 | Log ${group.group_id} 40 | IF "${group_id}" == "${group.group_id}" 41 | Log ${group.group_id} 42 | Log ${group.state} 43 | Pass Execution "Consumer found in list" 44 | END 45 | END 46 | Fail 47 | [Teardown] Basic Teardown ${group_id} 48 | 49 | AdminClient Describe Consumer Groups 50 | [Documentation] Finish the test with memebers + verification 51 | ${producer_group_id}= Create Producer 52 | Produce ${producer_group_id} topic=admindescribetest value=Hello partition=${0} 53 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_group_id} 54 | 55 | ${group_id}= Create Consumer auto_offset_reset=earliest 56 | Subscribe Topic ${group_id} topics=admindescribetest 57 | Sleep 2s # Wait for subscription 58 | ${group2_id}= Create Consumer auto_offset_reset=earliest 59 | Subscribe Topic ${group2_id} topics=admindescribetest 60 | Sleep 2s # Wait for subscription 61 | ${groups}= Create List ${group_id} ${group2_id} 62 | 63 | ${admin_client_id}= Create Admin Client 64 | ${described_groups}= Describe Groups ${admin_client_id} group_ids=${groups} 65 | Log ${described_groups} 66 | 67 | FOR ${member} IN @{described_groups["${group_id}"].members} 68 | Log ${member} 69 | END 70 | Log ${described_groups["${group_id}"].state} 71 | Log ${described_groups["${group2_id}"].state} 72 | 73 | [Teardown] Run Keywords Basic Teardown ${group_id} AND 74 | ... Basic Teardown ${group2_id} 75 | 76 | AdminClient Delete Consumer Groups 77 | ${producer_group_id}= Create Producer 78 | Produce ${producer_group_id} topic=admindeltest value=Hello partition=${0} 79 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_group_id} 80 | 81 | ${group_id}= Create Consumer auto_offset_reset=earliest 82 | Subscribe Topic ${group_id} topics=admindeltest 83 | Sleep 2s # Wait for subscription 84 | ${group2_id}= Create Consumer auto_offset_reset=earliest 85 | Subscribe Topic ${group2_id} topics=admindeltest 86 | Sleep 2s # Wait for subscription 87 | ${groups}= Create List ${group2_id} 88 | ${messages}= Poll group_id=${group2_id} max_records=5 89 | Sleep 1s 90 | Unsubscribe ${group2_id} 91 | Close Consumer ${group2_id} 92 | 93 | ${admin_client_id}= Create Admin Client 94 | ${deletion}= Delete Groups ${admin_client_id} group_ids=${groups} 95 | Should Be Equal ${deletion["${group2_id}"]} ${None} 96 | 97 | ${current_groups}= List Groups ${admin_client_id} 98 | Log ${current_groups.valid} 99 | FOR ${group} IN @{current_groups.valid} 100 | Log ${group.group_id} 101 | IF "${group_id}" == "${group.group_id}" 102 | Log ${group.group_id} 103 | Log ${group.state} 104 | Log "Consumer found in list" 105 | END 106 | IF "${group2_id}" == "${group.group_id}" 107 | Log ${group.group_id} 108 | Log ${group.state} 109 | Fail "Group 1 consumer was not removed!" 110 | END 111 | END 112 | [Teardown] Basic Teardown ${group_id} 113 | 114 | AdminClient New Partitions 115 | ${topic_name}= Set Variable admin_testing_partition 116 | ${topic}= New Topic ${topic_name} num_partitions=${1} replication_factor=${1} 117 | ${admin_client_id}= Create Admin Client 118 | Create Topics group_id=${admin_client_id} new_topics=${topic} 119 | 120 | ${new_parts}= New Partitions ${topic_name} new_total_count=${2} 121 | ${resp}= Create Partitions group_id=${admin_client_id} new_partitions=${new_parts} 122 | Log ${resp} 123 | [Teardown] Delete Topics ${admin_client_id} ${topic_name} 124 | 125 | AdminClient Describe Configs 126 | ${resource}= Config Resource ${ADMIN_RESOURCE_BROKER} 1 127 | Log ${resource.name} 128 | ${admin_client_id}= Create Admin Client 129 | ${config}= Describe Configs ${admin_client_id} ${resource} 130 | Log ${config} 131 | 132 | Should Not Be Empty ${config} 133 | ${name}= Set Variable ${config["${resource.name}"]['offsets.commit.timeout.ms'].name} 134 | ${value}= Set Variable ${config["${resource.name}"]['offsets.commit.timeout.ms'].value} 135 | Should Be Equal As Strings ${name} offsets.commit.timeout.ms 136 | Should Be Equal As Integers ${value} ${5000} 137 | 138 | AdminClient Alter Configs 139 | ${data}= Create Dictionary log.retention.ms=${54321} # DotDict 140 | ${data}= Convert To Dictionary ${data} # dict 141 | ${resource}= Config Resource ${ADMIN_RESOURCE_BROKER} 1 set_config=${data} 142 | ${admin_client_id}= Create Admin Client 143 | 144 | ${resp}= Alter Configs ${admin_client_id} ${resource} 145 | Log ${resp} 146 | Sleep 1s 147 | ${config}= Describe Configs ${admin_client_id} ${resource} 148 | Should Be Equal As Integers ${54321} ${config["${resource.name}"]['log.retention.ms'].value} 149 | 150 | AdminClient Describe Topics 151 | ${topic_names}= Create List admintesting1 admintesting2 admintesting3 152 | ${topics}= Create List 153 | FOR ${topic} IN @{topic_names} 154 | ${topic}= New Topic ${topic} num_partitions=${1} replication_factor=${1} 155 | Append To List ${topics} ${topic} 156 | END 157 | 158 | ${admin_client_id}= Create Admin Client 159 | ${results}= Create Topics group_id=${admin_client_id} new_topics=${topics} 160 | Log ${results} 161 | 162 | ${results}= Describe Topics ${admin_client_id} ${topic_names} 163 | Log ${results} 164 | FOR ${topic} IN @{topic_names} 165 | ${status}= Evaluate len("${results["${topic}"].topic_id}") > 0 166 | Should Be True ${status} 167 | END 168 | [Teardown] Delete Topics ${admin_client_id} ${topic_names} 169 | 170 | AdminClient Describe Cluster 171 | ${admin_client_id}= Create Admin Client 172 | ${cluster}= Describe Cluster ${admin_client_id} 173 | Should Not Be Empty ${cluster.cluster_id} 174 | 175 | *** Keywords *** 176 | All Messages Are Delivered 177 | [Arguments] ${producer_id} 178 | ${count}= Flush ${producer_id} 179 | Log Reaming messages to be delivered: ${count} 180 | Should Be Equal As Integers ${count} 0 181 | 182 | Basic Teardown 183 | [Arguments] ${group_id} 184 | Unsubscribe ${group_id} 185 | Close Consumer ${group_id} 186 | ${groups}= Create List ${group_id} 187 | ${admin_client_id}= Create Admin Client 188 | Delete Groups ${admin_client_id} group_ids=${groups} -------------------------------------------------------------------------------- /examples/test_avro.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library Collections 4 | Library String 5 | 6 | Suite Setup Starting Test 7 | 8 | 9 | *** Test Cases *** 10 | Avro Producer Consumer With Serializers 11 | ${schema_registry_conf}= Create Dictionary url=http://127.0.0.1:8081 12 | ${schema_registry_client}= Get Schema Registry Client ${schema_registry_conf} 13 | ${schema_str}= Set Variable {"namespace": "example.avro","type": "record","name": "User","fields": [{"name": "name","type": "string"},{"name": "number","type": ["int","null"]}]} 14 | ${avro_serializer}= Get Avro Serializer ${schema_str} ${schema_registry_client} 15 | ${avro_deserializer}= Get Avro Deserializer ${schema_str} ${schema_registry_client} 16 | ${string_serializer}= Get String Serializer 17 | ${string_deserializer}= Get String Deserializer 18 | 19 | ${producer_id}= Create Producer key_serializer=${string_serializer} value_serializer=${avro_serializer} serializing=${True} 20 | ${value}= Create Dictionary name=Robot number=${10} 21 | Produce group_id=${producer_id} topic=avro_testing1 partition=${0} value=${value} key=${KEY} 22 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 23 | 24 | ${consumer_group_id}= Create Consumer auto_offset_reset=latest key_deserializer=${string_deserializer} value_deserializer=${avro_deserializer} deserializing=${True} 25 | Subscribe Topic group_id=${consumer_group_id} topics=avro_testing1 26 | Poll group_id=${consumer_group_id} # Dummy poll when using offset reset 'latest' 27 | 28 | Produce group_id=${producer_id} topic=avro_testing1 value=${value} partition=${0} key=${KEY} 29 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 30 | ${messages}= Poll group_id=${consumer_group_id} 31 | Should Be Equal ${messages} ${TEST_DATA} 32 | [Teardown] Basic Teardown ${consumer_group_id} 33 | 34 | 35 | *** Keywords *** 36 | Starting Test 37 | Set Suite Variable @{TEST_TOPIC} avro_testing1 38 | Set Suite Variable ${KEY} 568a68fd-2785-44cc-8997-1295c3755d28 39 | 40 | ${value}= Create Dictionary name=Robot number=${10} 41 | ${data}= Create List ${value} 42 | Set Suite Variable ${TEST_DATA} ${data} 43 | 44 | All Messages Are Delivered 45 | [Arguments] ${producer_id} 46 | ${count}= Flush ${producer_id} 47 | Log Reaming messages to be delivered: ${count} 48 | Should Be Equal As Integers ${count} 0 49 | 50 | Basic Teardown 51 | [Arguments] ${group_id} 52 | Unsubscribe ${group_id} 53 | Close Consumer ${group_id} 54 | ${groups}= Create List ${group_id} 55 | ${admin_client_id}= Create Admin Client 56 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 57 | Log ${resp} -------------------------------------------------------------------------------- /examples/test_oauth.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library oauth_example 4 | Library Collections 5 | Library String 6 | 7 | 8 | *** Variables *** 9 | ${SEEN_RF_OAUTH_CB} ${False} 10 | 11 | 12 | *** Test Cases *** 13 | Example Oauth 14 | [Documentation] Example of how to use OAUTH with library and call functools 15 | ... via get_token function. For better handling there could be 16 | ... some global variable which can be set inside of python lib. 17 | ... Not executable right now, needs update env (issue #21). 18 | 19 | Skip 20 | 21 | ${string_serializer}= Get String Serializer 22 | ${value_serializer}= Get String Serializer 23 | 24 | # This returns: functools.partial(, 'configuration') 25 | ${fun}= oauth_example.get_token configuration 26 | 27 | ${producer_id}= Create Producer key_serializer=${string_serializer} value_serializer=${value_serializer} legacy=${False} security.protocol=sasl_plaintext sasl.mechanisms=OAUTHBEARER oauth_cb=${fun} 28 | 29 | #... 30 | -------------------------------------------------------------------------------- /examples/test_protobuf.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library schema/protobuf/user_helper.py 4 | 5 | *** Test Cases *** 6 | Protobuf Producer With Serializer 7 | ${schema_registry_conf}= Create Dictionary url=http://127.0.0.1:8081 8 | ${schema_registry_client}= Get Schema Registry Client ${schema_registry_conf} 9 | ${msg_type}= Get Type 10 | ${protobuf_serializer}= Get Protobuf Serializer ${msg_type} ${schema_registry_client} 11 | ${protobuf_deserializer}= Get Protobuf Deserializer ${msg_type} 12 | ${string_serializer}= Get String Serializer 13 | 14 | ${producer_id}= Create Producer key_serializer=${string_serializer} value_serializer=${protobuf_serializer} serializing=${True} 15 | ${value}= Create User Robot 10 16 | Produce group_id=${producer_id} topic=protobuf_testing1 key=bd232464-e3d3-425d-93b7-5789dc7273c1 value=${value} 17 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 18 | 19 | Protobuf Producer Consumer With Serializer 20 | ${schema_registry_conf}= Create Dictionary url=http://127.0.0.1:8081 21 | ${schema_registry_client}= Get Schema Registry Client ${schema_registry_conf} 22 | ${msg_type}= Get Type 23 | ${protobuf_serializer}= Get Protobuf Serializer ${msg_type} ${schema_registry_client} 24 | ${protobuf_deserializer}= Get Protobuf Deserializer ${msg_type} 25 | ${string_serializer}= Get String Serializer 26 | ${string_deserializer}= Get String Deserializer 27 | 28 | ${producer_id}= Create Producer key_serializer=${string_serializer} value_serializer=${protobuf_serializer} serializing=${True} 29 | ${value}= Create User Robot 10 30 | Produce group_id=${producer_id} topic=protobuf_testing2 key=f01df0c6-ec0b-49e9-835f-d766a9e8036f value=${value} 31 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 32 | 33 | ${consumer_group_id}= Create Consumer auto_offset_reset=earliest key_deserializer=${string_deserializer} value_deserializer=${protobuf_deserializer} deserializing=${True} 34 | Subscribe Topic group_id=${consumer_group_id} topics=protobuf_testing2 35 | ${messages}= Poll group_id=${consumer_group_id} 36 | Length Should Be ${messages} 1 37 | Should Be Equal ${messages[0]} ${value} 38 | [Teardown] Basic Teardown ${consumer_group_id} 39 | 40 | *** Keywords *** 41 | All Messages Are Delivered 42 | [Arguments] ${producer_id} 43 | ${count}= Flush ${producer_id} 44 | Log Reaming messages to be delivered: ${count} 45 | Should Be Equal As Integers ${count} 0 46 | 47 | Basic Teardown 48 | [Arguments] ${group_id} 49 | Unsubscribe ${group_id} 50 | Close Consumer ${group_id} 51 | ${groups}= Create List ${group_id} 52 | ${admin_client_id}= Create Admin Client 53 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 54 | Log ${resp} 55 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from os.path import join, dirname 4 | from setuptools import setup 5 | 6 | filename=join(dirname(__file__), 'src', 'ConfluentKafkaLibrary', 'version.py') 7 | exec(compile(open(filename).read(),filename, 'exec')) 8 | 9 | DESCRIPTION = """ 10 | Confluent Kafka wrapped in Robot Framework. 11 | """[1:-1] 12 | 13 | AVRO_REQUIRES = ['fastavro >= 1.3.2', 'avro >= 1.11.1'] 14 | JSON_REQUIRES = ['jsonschema >= 3.2.0', 'pyrsistent >= 0.20.0'] 15 | PROTO_REQUIRES = ['protobuf >= 4.22.0', 'googleapis-common-protos >= 1.66.0'] 16 | SCHEMA_REGISTRY_REQUIRES = ['httpx>=0.26', 'cachetools >= 5.5.0', 'attrs >= 24.3.0'] 17 | ALL = AVRO_REQUIRES + JSON_REQUIRES + PROTO_REQUIRES + SCHEMA_REGISTRY_REQUIRES 18 | setup(name = 'robotframework-confluentkafkalibrary', 19 | version = VERSION, 20 | description = 'Confluent Kafka library for Robot Framework', 21 | long_description = DESCRIPTION, 22 | author = 'Robert Karasek', 23 | author_email = '', 24 | url = 'https://github.com/robooo/robotframework-ConfluentKafkaLibrary', 25 | license = 'Apache License 2.0', 26 | keywords = 'robotframework confluent kafka', 27 | platforms = 'any', 28 | classifiers = [ 29 | "License :: OSI Approved :: Apache Software License", 30 | "Operating System :: OS Independent", 31 | "Programming Language :: Python", 32 | "Topic :: Software Development :: Testing" 33 | ], 34 | install_requires = [ 35 | 'robotframework >= 3.2.1', 36 | 'confluent-kafka == 2.8.0', 37 | 'requests >= 2.25.1', 38 | ], 39 | extras_require={ 40 | 'all': ALL, 41 | 'avro': AVRO_REQUIRES, 42 | 'json': JSON_REQUIRES, 43 | 'protobuf': PROTO_REQUIRES, 44 | 'schemaregistry': SCHEMA_REGISTRY_REQUIRES, 45 | }, 46 | package_dir = {'' : 'src'}, 47 | packages = ['ConfluentKafkaLibrary'], 48 | ) 49 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/__init__.py: -------------------------------------------------------------------------------- 1 | import confluent_kafka 2 | from confluent_kafka import ConsumerGroupState 3 | from confluent_kafka.schema_registry import SchemaRegistryClient 4 | from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, ConfigResource 5 | from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError 6 | from .consumer import KafkaConsumer 7 | from .producer import KafkaProducer 8 | from .admin_client import KafkaAdminClient 9 | from .version import VERSION 10 | 11 | IMPORTS = KafkaConsumer, KafkaProducer, KafkaAdminClient 12 | try: 13 | from .serialization import Serializer, Deserializer 14 | IMPORTS += Serializer, Deserializer 15 | except ImportError: 16 | pass 17 | 18 | #class ConfluentKafkaLibrary(KafkaConsumer, KafkaProducer, Serializer, Deserializer): 19 | class ConfluentKafkaLibrary(*IMPORTS): 20 | """ConfluentKafkaLibrary is a Robot Framework library which wraps up 21 | [https://github.com/confluentinc/confluent-kafka-python | confluent-kafka-python]. 22 | Library supports more functionality like running more clients based on `group_id` 23 | or running them in threaded mode during the tests, decoding of gathered data etc. (`See` `Examples`). 24 | 25 | This document explains how to use keywords provided by ConfluentKafkaLibrary. 26 | For information about installation, support, and more, please visit the 27 | [https://github.com/robooo/robotframework-ConfluentKafkaLibrary | project github page]. 28 | For more information about Robot Framework, see http://robotframework.org. 29 | 30 | == Examples == 31 | See [https://github.com/robooo/robotframework-ConfluentKafkaLibrary/tree/master/examples | repo examples]. 32 | 33 | *Basic Consumer with predefined group_id* 34 | 35 | | ${group_id}= | `Create Consumer` | group_id=mygroup | # if group_id is not defined uuid4() is generated | 36 | | `Subscribe Topic` | group_id=${group_id} | topics=test_topic | 37 | | ${result}= | `Poll` | group_id=${group_id} | max_records=5 | 38 | | `Log` | ${result} | 39 | | `Unsubscribe` | ${group_id} | 40 | | `Close Consumer` | ${group_id} | 41 | 42 | *More Consumers* 43 | 44 | | ${group_id_1}= | `Create Consumer` | 45 | | `Subscribe Topic` | group_id=${group_id_1} | topics=topic1 | 46 | | ${group_id_2}= | `Create Consumer` | 47 | | `Subscribe Topic` | group_id=${group_id_2} | topics=topic2 | 48 | | ${result_1}= | `Poll` | group_id=${group_id_1} | max_records=5 | 49 | | ${result_2}= | `Poll` | group_id=${group_id_2} | max_records=2 | 50 | | `Unsubscribe` | ${group_id_1} | 51 | | `Unsubscribe` | ${group_id_2} | 52 | | `Close Consumer` | ${group_id_1} | 53 | | `Close Consumer` | ${group_id_2} | 54 | 55 | *Handle Byte Data From Topic* 56 | 57 | | ${messages}= | Poll | group_id=${group_id} | max_records=3 | decode_format=utf_8 | 58 | | ${json} | Convert String to JSON | ${messages}[0] | 59 | | ${jsonValue} | Get value from JSON | ${json} | $.key | 60 | 61 | """ 62 | 63 | ROBOT_LIBRARY_VERSION = VERSION 64 | ROBOT_LIBRARY_SCOPE = 'GLOBAL' 65 | 66 | def __init__(self): 67 | KafkaConsumer.__init__(self) 68 | KafkaProducer.__init__(self) 69 | KafkaAdminClient.__init__(self) 70 | self._set_globals_variables_if_robot_running() 71 | 72 | def _set_globals_variables_if_robot_running(self): 73 | try: 74 | BuiltIn().set_global_variable('${OFFSET_BEGINNING}', confluent_kafka.OFFSET_BEGINNING) 75 | BuiltIn().set_global_variable('${OFFSET_END}', confluent_kafka.OFFSET_END) 76 | BuiltIn().set_global_variable('${OFFSET_STORED}', confluent_kafka.OFFSET_STORED) 77 | BuiltIn().set_global_variable('${OFFSET_INVALID}', confluent_kafka.OFFSET_INVALID) 78 | BuiltIn().set_global_variable('${ADMIN_RESOURCE_BROKER}', confluent_kafka.admin.RESOURCE_BROKER) 79 | BuiltIn().set_global_variable('${ADMIN_RESOURCE_GROUP}', confluent_kafka.admin.RESOURCE_GROUP) 80 | BuiltIn().set_global_variable('${ADMIN_RESOURCE_TOPIC}', confluent_kafka.admin.RESOURCE_TOPIC) 81 | 82 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_UNKNOWN}', confluent_kafka.ConsumerGroupState.UNKNOWN) 83 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_PREPARING_REBALANCING}', confluent_kafka.ConsumerGroupState.PREPARING_REBALANCING) 84 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_COMPLETING_REBALANCING}', confluent_kafka.ConsumerGroupState.COMPLETING_REBALANCING) 85 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_STABLE}', confluent_kafka.ConsumerGroupState.STABLE) 86 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_DEAD}', confluent_kafka.ConsumerGroupState.DEAD) 87 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_EMPTY}', confluent_kafka.ConsumerGroupState.EMPTY) 88 | 89 | except RobotNotRunningError as e: 90 | pass 91 | 92 | def list_topics(self, group_id, topic=None): 93 | """Request Metadata from cluster. Could be executed with consumer or producer group_id too. 94 | - ``topic`` (str): If specified, only request info about this topic, else return for all topics in cluster. 95 | Default: `None`. 96 | - ``group_id`` (str): *required* id of the created consumer or producer. 97 | """ 98 | if group_id is None: 99 | raise TypeError 100 | 101 | if group_id in self.admin_clients: 102 | return self.admin_clients[group_id].list_topics().topics 103 | if group_id in self.consumers: 104 | return self.consumers[group_id].list_topics(topic).topics 105 | if group_id in self.producers: 106 | return self.producers[group_id].list_topics(topic).topics 107 | 108 | raise ValueError('Consumer or producer group_id is wrong or does not exists!') 109 | 110 | def new_topic(self, topic, **kwargs): 111 | """Instantiate a NewTopic object. Specifies per-topic settings for passing to AdminClient.create_topics(). 112 | - ``topic`` (str): Topic name 113 | Note: In a multi-cluster production scenario, it is more typical to use a 114 | replication_factor of 3 for durability. 115 | """ 116 | return NewTopic(topic=topic, **kwargs) 117 | 118 | def new_partitions(self, topic, **kwargs): 119 | """Instantiate a NewPartitions object. 120 | - ``topic`` (str): Topic name 121 | """ 122 | return NewPartitions(topic=topic, **kwargs) 123 | 124 | def config_resource(self, restype, name, **kwargs): 125 | """Represents a resource that has configuration, and (optionally) a collection of configuration properties 126 | for that resource. Used by describe_configs() and alter_configs(). 127 | - ``restype`` (ConfigResource.Type): The resource type. 128 | - ``name`` (str): The resource name, which depends on the resource type. For RESOURCE_BROKER, 129 | the resource name is the broker id. 130 | """ 131 | return ConfigResource(restype=restype, name=name, **kwargs) 132 | 133 | def get_schema_registry_client(self, conf): 134 | return SchemaRegistryClient(conf) 135 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/admin_client.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from confluent_kafka.admin import AdminClient 3 | from confluent_kafka import KafkaException, TopicCollection 4 | 5 | 6 | class KafkaAdminClient(): 7 | 8 | def __init__(self): 9 | self.admin_clients = {} 10 | 11 | def create_admin_client( 12 | self, 13 | group_id=None, 14 | server="127.0.0.1", 15 | port="9092", 16 | **kwargs 17 | ): 18 | if group_id is None: 19 | group_id = str(uuid.uuid4()) 20 | 21 | admin_client = AdminClient({ 22 | 'bootstrap.servers': '{}:{}'.format(server, port), 23 | **kwargs}) 24 | 25 | self.admin_clients[group_id] = admin_client 26 | return group_id 27 | 28 | def list_groups(self, group_id, states=None, request_timeout=10): 29 | """List consumer groups. 30 | - ``states`` (list(ConsumerGroupState)): filter consumer groups which are currently in these states. 31 | For example usage see 'AdminClient List Consumer Groups' at 32 | examples/test_adminclient.py 33 | Default: `None`. 34 | - ``request_timeout`` (int): Maximum response time before timing out. 35 | Default: `10`. 36 | """ 37 | if states is None: 38 | states = [] 39 | future = self.admin_clients[group_id].list_consumer_groups(request_timeout=request_timeout, states=set(states)) 40 | return future.result() 41 | 42 | def describe_groups(self, group_id, group_ids, request_timeout=10): 43 | """Describe consumer groups. 44 | - ``group_ids`` (list(str)): List of group_ids which need to be described. 45 | - ``request_timeout`` (int): Maximum response time before timing out. 46 | Default: `10`. 47 | """ 48 | response = self.admin_clients[group_id].describe_consumer_groups(group_ids, request_timeout=request_timeout) 49 | 50 | groups_results={} 51 | for con_id in group_ids: 52 | try: 53 | if response[con_id].exception() is None: 54 | groups_results[con_id] = response[con_id].result() 55 | else: 56 | groups_results[con_id] = response[con_id].exception() 57 | except KafkaException as e: 58 | return f"Failed to describe group {con_id}: {e}" 59 | except (TypeError, ValueError ) as e: 60 | return f"Invalid input: {e}" 61 | return groups_results 62 | 63 | def delete_groups(self, group_id, group_ids, request_timeout=10): 64 | """Delete the given consumer groups. 65 | - ``group_ids`` (list(str)): List of group_ids which need to be deleted. 66 | - ``request_timeout`` (int): Maximum response time before timing out. 67 | Default: `10`. 68 | """ 69 | response = self.admin_clients[group_id].delete_consumer_groups(group_ids, request_timeout=request_timeout) 70 | 71 | groups_results={} 72 | for con_id in group_ids: 73 | try: 74 | if response[con_id].exception() is None: 75 | groups_results[con_id] = response[con_id].result() 76 | else: 77 | groups_results[con_id] = response[con_id].exception() 78 | except KafkaException as e: 79 | return f"Failed to delete group {con_id}: {e}" 80 | except (TypeError, ValueError ) as e: 81 | return f"Invalid input: {e}" 82 | return groups_results 83 | 84 | def create_topics(self, group_id, new_topics, **kwargs): 85 | """Create one or more new topics and wait for each one to finish. 86 | - ``new_topics`` (list(NewTopic) or NewTopic): A list of specifications (NewTopic) 87 | or a single instance for the topics that should be created. 88 | """ 89 | fs = None 90 | if isinstance(new_topics, list): 91 | fs = self.admin_clients[group_id].create_topics(new_topics, **kwargs) 92 | else: 93 | fs = self.admin_clients[group_id].create_topics([new_topics], **kwargs) 94 | 95 | topics_results={} 96 | for topic, f in fs.items(): 97 | try: 98 | if f.exception() is None: 99 | topics_results[topic] = f.result() 100 | else: 101 | topics_results[topic] = f.exception() 102 | except KafkaException as e: 103 | return f"Failed to create topic {topic}: {e}" 104 | except (TypeError, ValueError ) as e: 105 | return f"Invalid input: {e}" 106 | return topics_results 107 | 108 | def delete_topics(self, group_id, topics, **kwargs): 109 | if isinstance(topics, str): 110 | topics = [topics] 111 | 112 | fs = self.admin_clients[group_id].delete_topics(topics, **kwargs) 113 | 114 | topics_results={} 115 | for topic, f in fs.items(): 116 | try: 117 | if f.exception() is None: 118 | topics_results[topic] = f.result() 119 | else: 120 | topics_results[topic] = f.exception() 121 | except KafkaException as e: 122 | return f"Failed to delete topic {topic}: {e}" 123 | except (TypeError, ValueError ) as e: 124 | return f"Invalid input: {e}" 125 | return topics_results 126 | 127 | def create_partitions(self, group_id, new_partitions, **kwargs): 128 | """Create additional partitions for the given topics. 129 | - ``new_partitions`` (list(NewPartitions) or NewPartitions): New partitions to be created. 130 | """ 131 | fs = None 132 | if isinstance(new_partitions, list): 133 | fs = self.admin_clients[group_id].create_partitions(new_partitions, **kwargs) 134 | else: 135 | fs = self.admin_clients[group_id].create_partitions([new_partitions], **kwargs) 136 | 137 | partitions_results={} 138 | for partition, f in fs.items(): 139 | try: 140 | if f.exception() is None: 141 | partitions_results[partition] = f.result() 142 | else: 143 | partitions_results[partition] = f.exception() 144 | except KafkaException as e: 145 | return f"Failed to add partitions to topic {partition}: {e}" 146 | except (TypeError, ValueError ) as e: 147 | return f"Invalid input: {e}" 148 | return partitions_results 149 | 150 | def describe_configs(self, group_id, resources, **kwargs): 151 | """Get the configuration of the specified resources. 152 | - ``resources`` (list(ConfigResource) or ConfigResource): Resources to get the configuration for. 153 | """ 154 | fs = None 155 | if isinstance(resources, list): 156 | fs = self.admin_clients[group_id].describe_configs(resources, **kwargs) 157 | else: 158 | fs = self.admin_clients[group_id].describe_configs([resources], **kwargs) 159 | 160 | config_results={} 161 | for config, f in fs.items(): 162 | try: 163 | if f.exception() is None: 164 | config_results[config.name] = f.result() 165 | else: 166 | config_results[config.name] = f.exception() 167 | except KafkaException as e: 168 | return f"Failed to describe config {config.name}: {e}" 169 | except (TypeError, ValueError ) as e: 170 | return f"Invalid input: {e}" 171 | return config_results 172 | 173 | def describe_topics(self, group_id, topics, **kwargs): 174 | """Describe topics. 175 | - ``topics`` (list(str) or str): List of topic names or only topic name to describe. 176 | """ 177 | if isinstance(topics, list): 178 | topics = TopicCollection(topics) 179 | else: 180 | topics = TopicCollection([topics]) 181 | 182 | topics = self.admin_clients[group_id].describe_topics(topics, **kwargs) 183 | topics_results={} 184 | for topic, f in topics.items(): 185 | try: 186 | if f.exception() is None: 187 | topics_results[topic] = f.result() 188 | else: 189 | topics_results[topic] = f.exception() 190 | except KafkaException as e: 191 | return f"Failed to describe topic {topic.name}: {e}" 192 | except (TypeError, ValueError ) as e: 193 | return f"Invalid input: {e}" 194 | return topics_results 195 | 196 | def describe_cluster(self, group_id, **kwargs): 197 | """Describe cluster. 198 | """ 199 | cluster = self.admin_clients[group_id].describe_cluster(**kwargs) 200 | try: 201 | if cluster.exception() is None: 202 | cluster = cluster.result() 203 | else: 204 | cluster = cluster.exception() 205 | except KafkaException as e: 206 | return f"Failed to describe cluster: {e}" 207 | except (TypeError, ValueError ) as e: 208 | return f"Invalid input: {e}" 209 | return cluster 210 | 211 | def alter_configs(self, group_id, resources, **kwargs): 212 | """Update configuration properties for the specified resources. 213 | - ``resources`` (list(ConfigResource) or ConfigResource): Resources to update configuration of. 214 | """ 215 | fs = None 216 | if isinstance(resources, list): 217 | fs = self.admin_clients[group_id].alter_configs(resources, **kwargs) 218 | else: 219 | fs = self.admin_clients[group_id].alter_configs([resources], **kwargs) 220 | 221 | config_results={} 222 | for config, f in fs.items(): 223 | try: 224 | if f.exception() is None: 225 | config_results[config.name] = f.result() 226 | else: 227 | config_results[config.name] = f.exception() 228 | except KafkaException as e: 229 | return f"Failed to alter config {config.name}: {e}" 230 | except (TypeError, ValueError ) as e: 231 | return f"Invalid input: {e}" 232 | return config_results -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/consumer.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from threading import Thread 3 | from confluent_kafka import Consumer, KafkaException, KafkaError, TopicPartition 4 | from confluent_kafka import DeserializingConsumer 5 | from confluent_kafka.admin import AdminClient 6 | 7 | try: 8 | from confluent_kafka.avro.serializer import SerializerError 9 | except ImportError: 10 | pass 11 | 12 | 13 | class GetMessagesThread(Thread): 14 | 15 | def __init__( 16 | self, 17 | server='127.0.0.1', 18 | port='9092', 19 | topics='', 20 | group_id=None, 21 | only_value=True, 22 | **kwargs 23 | ): 24 | 25 | super().__init__() 26 | self.daemon = True 27 | self.server = server 28 | self.port = port 29 | self._is_running = True 30 | self.only_value = only_value 31 | self.consumer = KafkaConsumer() 32 | self.group_id = self.consumer.create_consumer(group_id=group_id, 33 | server=server, 34 | port=port, 35 | **kwargs) 36 | self.kwargs = kwargs 37 | if not isinstance(topics, list): 38 | topics = [topics] 39 | self.consumer.subscribe_topic(self.group_id, topics=topics) 40 | self.messages = [] 41 | self.messages += self.consumer.poll(group_id=self.group_id, only_value=self.only_value) 42 | self.start() 43 | 44 | def run(self): 45 | while self._is_running: 46 | try: 47 | self.messages += self.consumer.poll(group_id=self.group_id, only_value=self.only_value) 48 | except RuntimeError: 49 | self.consumer.unsubscribe(self.group_id) 50 | self.consumer.close_consumer(self.group_id) 51 | self._is_running = False 52 | 53 | def get_group_id(self): 54 | return self.group_id 55 | 56 | def get_messages(self): 57 | return self.messages[:] 58 | 59 | def clear_messages(self): 60 | self.messages.clear() 61 | 62 | def stop_consumer(self): 63 | self._is_running = False 64 | self.join() 65 | self.consumer.unsubscribe(self.group_id) 66 | self.consumer.close_consumer(self.group_id) 67 | admin_client = AdminClient({'bootstrap.servers': f'{self.server}:{self.port}', **self.kwargs}) 68 | response = admin_client.delete_consumer_groups([self.group_id], request_timeout=10) 69 | try: 70 | response[self.group_id].result() 71 | except Exception as e: 72 | return e 73 | return response[self.group_id].exception() 74 | 75 | 76 | class KafkaConsumer(): 77 | 78 | def __init__(self): 79 | self.consumers = {} 80 | 81 | def create_consumer( 82 | self, 83 | group_id=None, 84 | server="127.0.0.1", 85 | port="9092", 86 | enable_auto_commit=True, 87 | auto_offset_reset="latest", 88 | auto_create_topics=True, 89 | key_deserializer=None, 90 | value_deserializer=None, 91 | deserializing=False, 92 | **kwargs 93 | ): 94 | """Create Kafka Consumer and returns its `group_id` as string. 95 | 96 | Keyword Arguments: 97 | - ``server``: (str): IP address / domain, that the consumer should 98 | contact to bootstrap initial cluster metadata. 99 | Default: `127.0.0.1`. 100 | - ``port`` (int): Port number. Default: `9092`. 101 | - ``group_id`` (str or uuid.uuid4() if not set) : name of the consumer group 102 | to join for dynamic partition assignment (if enabled), and to use for fetching and 103 | committing offsets. If None, unique string is generated (via uuid.uuid4()) 104 | and offset commits are disabled. Default: `None`. 105 | - ``auto_offset_reset`` (str): A policy for resetting offsets on 106 | OffsetOutOfRange errors: `earliest` will move to the oldest 107 | available message, `latest` will move to the most recent. Any 108 | other value will raise the exception. Default: `latest`. 109 | - ``enable_auto_commit`` (bool): If true the consumer's offset will be 110 | periodically committed in the background. Default: `True`. 111 | - ``auto_create_topics`` (bool): Consumers no longer trigger auto creation of topics, 112 | will be removed in future release. Default: `True`. 113 | - ``deserializing`` (bool): Activates DeserializingConsumer with deserialization capabilities. 114 | Default: `False`. 115 | 116 | Note: 117 | Configuration parameters are described in more detail at 118 | https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md \n 119 | """ 120 | if group_id is None: 121 | group_id = str(uuid.uuid4()) 122 | 123 | if deserializing: 124 | consumer = DeserializingConsumer({ 125 | 'bootstrap.servers': '{}:{}'.format(server, port), 126 | 'group.id': group_id, 127 | 'enable.auto.commit': enable_auto_commit, 128 | 'auto.offset.reset': auto_offset_reset, 129 | 'key.deserializer': key_deserializer, 130 | 'value.deserializer': value_deserializer, 131 | **kwargs}) 132 | else: 133 | consumer = Consumer({ 134 | 'bootstrap.servers': '{}:{}'.format(server, port), 135 | 'group.id': group_id, 136 | 'enable.auto.commit': enable_auto_commit, 137 | 'allow.auto.create.topics': auto_create_topics, 138 | 'auto.offset.reset': auto_offset_reset, 139 | **kwargs}) 140 | 141 | self.consumers[group_id] = consumer 142 | return group_id 143 | 144 | def get_all_consumers(self): 145 | """Returns all non-threaded consumers 146 | """ 147 | return self.consumers 148 | 149 | def create_topic_partition(self, topic_name, partition=None, offset=None): 150 | """Returns TopicPartiton object based on 151 | https://docs.confluent.io/current/clients/confluent-kafka-python/#topicpartition 152 | 153 | - ``topic_name`` (str): Topic name. 154 | - ``partition`` (int): Partition id. 155 | - ``offset`` (int): Initial partition offset. 156 | """ 157 | if partition is not None and offset is not None: 158 | return TopicPartition(topic_name, partition, offset) 159 | elif partition is None: 160 | return TopicPartition(topic_name, offset) 161 | elif offset is None: 162 | return TopicPartition(topic_name, partition) 163 | return TopicPartition(topic_name) 164 | 165 | def get_topic_partitions(self, topic): 166 | """Returns dictionary of all TopicPartitons in topic (topic.partitions). 167 | """ 168 | return topic.partitions 169 | 170 | def subscribe_topic(self, group_id, topics, **kwargs): 171 | """Subscribe to a list of topics, or a topic regex pattern. 172 | https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Consumer.subscribe 173 | 174 | - ``topics`` (list): List of topics for subscription. 175 | """ 176 | if not isinstance(topics, list): 177 | topics = [topics] 178 | self.consumers[group_id].subscribe(topics, **kwargs) 179 | 180 | def get_watermark_offsets(self, group_id, topic_partition, **kwargs): 181 | """Retrieve low and high offsets for partition. 182 | """ 183 | if not isinstance(topic_partition, TopicPartition): 184 | raise TypeError('topic_partition needs to be TopicPartition() type!') 185 | return self.consumers[group_id].get_watermark_offsets(topic_partition, **kwargs) 186 | 187 | def get_assignment(self, group_id): 188 | return self.consumers[group_id].assignment() 189 | 190 | def assign_to_topic_partition(self, group_id, topic_partitions): 191 | """Assign a list of TopicPartitions. 192 | 193 | - ``topic_partitions`` (`TopicPartition` or list of `TopicPartition`): Assignment for this instance. 194 | """ 195 | if isinstance(topic_partitions, TopicPartition): 196 | topic_partitions = [topic_partitions] 197 | for topic_partition in topic_partitions: 198 | if topic_partition not in self.consumers[group_id].assignment(): 199 | self.consumers[group_id].assign(topic_partitions) 200 | 201 | def unassign(self, group_id): 202 | self.consumers[group_id].unassign() 203 | 204 | def unsubscribe(self, group_id): 205 | """Unsubscribe of topics. 206 | """ 207 | self.consumers[group_id].unsubscribe() 208 | 209 | def close_consumer(self, group_id): 210 | """Close down and terminate the Kafka Consumer. 211 | """ 212 | self.consumers[group_id].close() 213 | del self.consumers[group_id] 214 | 215 | def seek(self, group_id, topic_partition): 216 | """https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Consumer.seek 217 | """ 218 | return self.consumers[group_id].seek(topic_partition) 219 | 220 | def get_position(self, group_id, topic_partitions): 221 | """Retrieve current positions (offsets) for the list of partitions. 222 | 223 | - ``topic_partitions`` (`TopicPartition` or list of `TopicPartition`): Assignment for this instance. 224 | """ 225 | if isinstance(topic_partitions, TopicPartition): 226 | topic_partitions = [topic_partitions] 227 | return self.consumers[group_id].position(topic_partitions) 228 | 229 | def pause(self, group_id, topic_partitions): 230 | """Pause consumption for the provided list of partitions. 231 | """ 232 | if isinstance(topic_partitions, TopicPartition): 233 | topic_partitions = [topic_partitions] 234 | self.consumers[group_id].pause(topic_partitions) 235 | 236 | def resume(self, group_id, topic_partitions): 237 | """Resume consumption for the provided list of partitions. 238 | """ 239 | if isinstance(topic_partitions, TopicPartition): 240 | topic_partitions = [topic_partitions] 241 | self.consumers[group_id].resume(topic_partitions) 242 | 243 | def store_offsets(self, group_id, **kwargs): 244 | """Store offsets for a message or a list of offsets. 245 | """ 246 | self.consumers[group_id].store_offsets(**kwargs) 247 | 248 | def poll( 249 | self, 250 | group_id, 251 | timeout=1, 252 | max_records=1, 253 | poll_attempts=10, 254 | only_value=True, 255 | auto_create_topics=True, 256 | decode_format=None, 257 | fail_on_deserialization=False 258 | ): 259 | """Fetch and return messages from assigned topics / partitions as list. 260 | - ``timeout`` (int): Seconds spent waiting in poll if data is not available in the buffer.\n 261 | - ``max_records`` (int): maximum number of messages to get from poll. Default: 1. 262 | If 0, returns immediately with any records that are available currently in the buffer, 263 | else returns empty. Must not be negative. Default: `1` 264 | - ``poll_attempts`` (int): Attempts to consume messages and endless looping prevention. 265 | Sometimes the first messages are None or the topic could be empty. Default: `10`. 266 | - ``only_value`` (bool): Return only message.value(). Default: `True`. 267 | - ``decode_format`` (str) - If you need to decode data to specific format 268 | (See https://docs.python.org/3/library/codecs.html#standard-encodings). Default: None. 269 | - ``auto_create_topics`` (bool): Consumers no longer trigger auto creation of topics, 270 | will be removed in future release. If True then the error message UNKNOWN_TOPIC_OR_PART is ignored. 271 | Default: `True`. 272 | - ``fail_on_deserialization`` (bool): If True and message deserialization fails, will raise a SerializerError 273 | exception; on False will just stop the current poll and return the message so far. Default: `False`. 274 | """ 275 | 276 | messages = [] 277 | while poll_attempts > 0: 278 | msg = None 279 | try: 280 | msg = self.consumers[group_id].poll(timeout=timeout) 281 | except SerializerError as err: 282 | error = 'Message deserialization failed for {}: {}'.format(msg, err) 283 | if fail_on_deserialization: 284 | raise SerializerError(error) 285 | 286 | print(error) 287 | break 288 | 289 | if msg is None: 290 | poll_attempts -= 1 291 | continue 292 | 293 | if msg.error(): 294 | # Workaround due to new message return + deprecation of the "Consumers no longer trigger auto creation of topics" 295 | if int(msg.error().code()) == KafkaError.UNKNOWN_TOPIC_OR_PART and auto_create_topics: 296 | continue 297 | raise KafkaException(msg.error()) 298 | 299 | if only_value: 300 | messages.append(msg.value()) 301 | else: 302 | messages.append(msg) 303 | 304 | if len(messages) == max_records: 305 | break 306 | 307 | if decode_format: 308 | messages = self._decode_data(data=messages, decode_format=decode_format) 309 | 310 | return messages 311 | 312 | def _decode_data(self, data, decode_format): 313 | if decode_format: 314 | return [record.decode(str(decode_format)) for record in data] 315 | return data 316 | 317 | # Experimental - getting messages from kafka topic every second 318 | def start_consumer_threaded( 319 | self, 320 | topics, 321 | group_id=None, 322 | server='127.0.0.1', 323 | port='9092', 324 | only_value=True, 325 | **kwargs 326 | ): 327 | """Run consumer in daemon thread and store data from topics. To read and work with this 328 | collected data use keyword `Get Messages From Thread`. 329 | Could be used at the Test setup or in each test. 330 | This is useful when you are reading always the same topics and you don't want to create 331 | consumer in each test to poll data. You can create as many consumers in the Test setup 332 | as you want and then in test just read data with `Get Messages From Thread` keyword. 333 | - ``topics`` (list): List of topics for subscription. 334 | - ``group_id`` (str or uuid.uuid4() if not set) : name of the consumer group to join for 335 | dynamic partition assignment (if enabled), and to use for fetching and 336 | committing offsets. If None, unique string is generated (via uuid.uuid4()) 337 | and offset commits are disabled. Default: `None`. 338 | """ 339 | if group_id is None: 340 | group_id = str(uuid.uuid4()) 341 | if topics is None: 342 | raise ValueError("Topics can not be empty!") 343 | 344 | consumer_thread = GetMessagesThread(server, port, topics, group_id=group_id, only_value=only_value, **kwargs) 345 | group_id = consumer_thread.group_id 346 | self.consumers[group_id] = consumer_thread.consumer.consumers[group_id] 347 | return consumer_thread 348 | 349 | def get_messages_from_thread(self, running_thread, decode_format=None): 350 | """Returns all records gathered from specific thread 351 | - ``running_thread`` (Thread object) - thread which was executed with 352 | `Start Consumer Threaded` keyword 353 | - ``decode_format`` (str) - If you need to decode data to specific format 354 | (See https://docs.python.org/3/library/codecs.html#standard-encodings). Default: None. 355 | """ 356 | records = running_thread.get_messages() 357 | if records: 358 | records = self._decode_data(records, decode_format) 359 | return records 360 | 361 | def get_thread_group_id(self, running_thread): 362 | return running_thread.get_group_id() 363 | 364 | def clear_messages_from_thread(self, running_thread): 365 | """Remove all records gathered from specific thread 366 | - ``running_thread`` (Thread object) - thread which was executed with 367 | `Start Consumer Threaded` keyword 368 | """ 369 | try: 370 | running_thread.clear_messages() 371 | except Exception as e: 372 | return f"Messages were not removed from thread {running_thread}!\n{e}" 373 | 374 | def stop_consumer_threaded(self, running_thread): 375 | resp = running_thread.stop_consumer() 376 | return resp -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/producer.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from confluent_kafka import SerializingProducer 3 | from confluent_kafka import Producer 4 | 5 | class KafkaProducer(): 6 | 7 | def __init__(self): 8 | self.producers = {} 9 | 10 | def create_producer( 11 | self, 12 | server='127.0.0.1', 13 | port='9092', 14 | group_id=None, 15 | key_serializer=None, 16 | value_serializer=None, 17 | serializing=False, 18 | **kwargs 19 | ): 20 | """Create Kafka Producer and returns its `group_id` as string. 21 | 22 | Keyword Arguments: 23 | - ``server``: (str): IP address / domain, that the consumer should 24 | contact to bootstrap initial cluster metadata. 25 | Default: `127.0.0.1`. 26 | - ``port`` (int): Port number. Default: `9092`. 27 | - ``serializing`` (bool): Activate SerializingProducer with serialization capabilities. 28 | Default: `False` 29 | """ 30 | if group_id is None: 31 | group_id = str(uuid.uuid4()) 32 | 33 | if serializing: 34 | producer = SerializingProducer({ 35 | 'bootstrap.servers': '{}:{}'.format(server, port), 36 | 'key.serializer': key_serializer, 37 | 'value.serializer': value_serializer, 38 | **kwargs} 39 | ) 40 | else: 41 | producer = Producer({ 42 | 'bootstrap.servers': '{}:{}'.format(server, port), 43 | **kwargs}) 44 | 45 | self.producers[group_id] = producer 46 | return group_id 47 | 48 | def produce( 49 | self, 50 | group_id, 51 | topic, 52 | value=None, 53 | key=None, 54 | headers=None, 55 | **kwargs 56 | ): 57 | """Produce message to topic asynchronously to Kafka by encoding with specified or default avro schema.\n 58 | https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Producer.produce 59 | 60 | - ``topic`` (str) : name of the topic where to produce message. 61 | - ``value`` (str|bytes): Message payload. 62 | - ``key`` (str|bytes): Message key. Default: `None`. 63 | - ``headers`` (dict[str, bytes]): Message headers. Default: `None`. 64 | - ``partition`` (int): Partition to produce to, else uses the configured built-in partitioner. 65 | """ 66 | self.producers[group_id].produce( 67 | topic=topic, 68 | value=value, 69 | key=key, 70 | headers=headers, 71 | **kwargs 72 | ) 73 | 74 | def flush(self, group_id, timeout=0.1): 75 | """Wait for all messages in the Producer queue to be delivered. Returns the number of messages still in queue. 76 | This is a convenience method that calls poll() until len() is zero or the optional timeout elapses. 77 | - `timeout` (real) : Optional timeout. Default: `0.1`. 78 | """ 79 | messages_in_queue = self.producers[group_id].flush(timeout) 80 | return messages_in_queue 81 | 82 | def purge(self, group_id, **kwargs): 83 | """Purge messages currently handled by the producer instance. 84 | """ 85 | self.producers[group_id].purge(**kwargs) 86 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/serialization.py: -------------------------------------------------------------------------------- 1 | from confluent_kafka.schema_registry.avro import AvroSerializer, AvroDeserializer 2 | from confluent_kafka.schema_registry.protobuf import ProtobufSerializer, ProtobufDeserializer 3 | from confluent_kafka.schema_registry.json_schema import JSONSerializer, JSONDeserializer 4 | from confluent_kafka.serialization import (DoubleSerializer, IntegerSerializer, StringSerializer, 5 | DoubleDeserializer, IntegerDeserializer, StringDeserializer) 6 | 7 | 8 | class Serializer(): 9 | 10 | def get_avro_serializer(self, schema_str, schema_registry_client, to_dict=None, conf=None): 11 | return AvroSerializer(schema_registry_client, schema_str, to_dict, conf) 12 | 13 | def get_double_serializer(self): 14 | return DoubleSerializer() 15 | 16 | def get_integer_serializer(self): 17 | return IntegerSerializer() 18 | 19 | def get_json_serializer(self, schema_str, schema_registry_client, to_dict=None, conf=None): 20 | return JSONSerializer(schema_str, schema_registry_client, to_dict, conf) 21 | 22 | def get_protobuf_serializer(self, msg_type, schema_registry_client, conf=None): 23 | base_conf = {'use.deprecated.format': False} 24 | if conf is None: 25 | conf = base_conf.copy() 26 | else: 27 | conf.update(base_conf) 28 | 29 | return ProtobufSerializer(msg_type, schema_registry_client, conf) 30 | 31 | def get_string_serializer(self, codec='utf_8'): 32 | return StringSerializer(codec) 33 | 34 | 35 | class Deserializer(): 36 | 37 | def get_avro_deserializer(self, schema_str, schema_registry_client, from_dict=None): 38 | return AvroDeserializer(schema_registry_client, schema_str, from_dict) 39 | 40 | def get_double_deserializer(self): 41 | return DoubleDeserializer() 42 | 43 | def get_integer_deserializer(self): 44 | return IntegerDeserializer() 45 | 46 | def get_json_deserializer(self, schema_str, from_dict=None): 47 | return JSONDeserializer(schema_str, from_dict) 48 | 49 | def get_protobuf_deserializer(self, message_type): 50 | return ProtobufDeserializer(message_type, {'use.deprecated.format': False}) 51 | 52 | def get_string_deserializer(self, codec='utf_8'): 53 | return StringDeserializer(codec) 54 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/version.py: -------------------------------------------------------------------------------- 1 | VERSION = '2.8.0-2' 2 | --------------------------------------------------------------------------------