├── src └── ConfluentKafkaLibrary │ ├── version.py │ ├── serialization.py │ ├── producer.py │ ├── __init__.py │ ├── admin_client.py │ └── consumer.py ├── setup.py ├── examples ├── schema │ └── protobuf │ │ ├── user.proto │ │ ├── user_helper.py │ │ └── user_pb2.py ├── test_oauth.robot ├── oauth2_test.py ├── docker-compose.yml ├── test_avro.robot ├── test_protobuf.robot ├── test_adminclient.robot └── test.robot ├── .github └── workflows │ ├── python-publish.yml │ ├── generate_docs.yml │ └── main.yml ├── pyproject.toml ├── .gitignore ├── README.md └── LICENSE /src/ConfluentKafkaLibrary/version.py: -------------------------------------------------------------------------------- 1 | VERSION = '2.12.2.post1' 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | 5 | setup() 6 | -------------------------------------------------------------------------------- /examples/schema/protobuf/user.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package example.protobuf; 3 | 4 | message User { 5 | string name = 1; 6 | int32 number = 2; 7 | } 8 | -------------------------------------------------------------------------------- /examples/schema/protobuf/user_helper.py: -------------------------------------------------------------------------------- 1 | from user_pb2 import User 2 | from robot.api.deco import keyword 3 | 4 | @keyword("Get Type") 5 | def get_type(): 6 | return User 7 | 8 | @keyword("Create User") 9 | def create_user(name, number: int, serialize :bool=False): 10 | new_user = User(name = name, number = number) 11 | 12 | if serialize: 13 | return new_user.SerializeToString() 14 | 15 | return new_user 16 | -------------------------------------------------------------------------------- /examples/test_oauth.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library oauth2_test 4 | Library Collections 5 | Library String 6 | 7 | 8 | *** Variables *** 9 | ${SEEN_RF_OAUTH_CB_PRODUCER} ${False} 10 | ${SEEN_RF_OAUTH_CB_CONSUMER} ${False} 11 | ${KAFKA_BOOTSTRAP_SERVERS} localhost:9092 12 | ${TEST_TOPIC} oauth2-test-topic 13 | 14 | 15 | *** Test Cases *** 16 | Test OAuth2 Token Generation 17 | ${test_token}= oauth2_test.create_test_token 18 | Should Not Be Empty ${test_token} 19 | ${producer_token_func}= oauth2_test.get_test_producer_token 20 | ${consumer_token_func}= oauth2_test.get_test_consumer_token 21 | 22 | 23 | Test OAuth2 Library Integration 24 | ${string_serializer}= Get String Serializer 25 | ${oauth_func}= oauth2_test.get_test_producer_token 26 | 27 | ${status} ${error}= Run Keyword And Ignore Error 28 | ... Create Producer localhost:9092 29 | ... oauth_cb=${oauth_func} 30 | ... security.protocol=sasl_plaintext 31 | ... sasl.mechanisms=OAUTHBEARER 32 | Should Be Equal ${status} PASS -------------------------------------------------------------------------------- /examples/schema/protobuf/user_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: user.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf.internal import builder as _builder 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import symbol_database as _symbol_database 9 | # @@protoc_insertion_point(imports) 10 | 11 | _sym_db = _symbol_database.Default() 12 | 13 | 14 | 15 | 16 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nuser.proto\x12\x10\x65xample.protobuf\"$\n\x04User\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x62\x06proto3') 17 | 18 | _globals = globals() 19 | _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) 20 | _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'user_pb2', _globals) 21 | if _descriptor._USE_C_DESCRIPTORS == False: 22 | 23 | DESCRIPTOR._options = None 24 | _globals['_USER']._serialized_start=32 25 | _globals['_USER']._serialized_end=68 26 | # @@protoc_insertion_point(module_scope) 27 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | release: 8 | types: [created] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v5 17 | - name: Set up Python 18 | uses: actions/setup-python@v6 19 | with: 20 | python-version: '3.x' 21 | - name: Install pypa/build 22 | run: >- 23 | python -m 24 | pip install 25 | build 26 | --user 27 | - name: Build a binary wheel and a source tarball 28 | run: >- 29 | python -m 30 | build 31 | --sdist 32 | --wheel 33 | --outdir dist/ 34 | . 35 | - name: Publish distribution 📦 to PyPI 36 | if: startsWith(github.ref, 'refs/tags') 37 | uses: pypa/gh-action-pypi-publish@release/v1 38 | with: 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /.github/workflows/generate_docs.yml: -------------------------------------------------------------------------------- 1 | name: Generate Docs 2 | 3 | on: 4 | workflow_run: 5 | workflows: [Upload Python Package] 6 | types: 7 | - completed 8 | # Allows you to run this workflow manually from the Actions tab 9 | workflow_dispatch: 10 | 11 | jobs: 12 | deploy: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v5 16 | # This latest python way can't find the librdkafka.h files 17 | # - name: Set up Python 18 | # uses: actions/setup-python@v4 19 | # with: 20 | # python-version: '3.x' 21 | - name: Install requirements 22 | run: pip install .[all] 23 | - name: Generate keyword documentation 24 | run: python3 -m robot.libdoc -f html src/ConfluentKafkaLibrary docs/index.html 25 | - uses: stefanzweifel/git-auto-commit-action@v6 26 | with: 27 | file_pattern: docs/index.html 28 | commit_message: Add keyword documentation 29 | push_options: '--force' 30 | - name: Deploy 🚀 31 | uses: JamesIves/github-pages-deploy-action@v4.5.0 32 | with: 33 | branch: gh-pages # The branch the action should deploy to. 34 | folder: docs # The folder the action should deploy. 35 | -------------------------------------------------------------------------------- /examples/oauth2_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import functools 3 | import json 4 | from robot.libraries.BuiltIn import BuiltIn 5 | 6 | 7 | def create_test_token(): 8 | """Create a test OAuth2 token for unsecured testing""" 9 | token_payload = { 10 | "sub": "test-user", 11 | "iss": "test-issuer", 12 | "aud": "kafka", 13 | "exp": int(time.time()) + 3600, 14 | "iat": int(time.time()), 15 | "scope": "kafka-producer kafka-consumer" 16 | } 17 | 18 | import base64 19 | token_json = json.dumps(token_payload) 20 | test_token = base64.b64encode(token_json.encode()).decode() 21 | 22 | return test_token 23 | 24 | 25 | def oauth_cb_test_producer(oauth_config): 26 | BuiltIn().set_global_variable("${SEEN_RF_OAUTH_CB_PRODUCER}", True) 27 | test_token = create_test_token() 28 | expiry_time = time.time() + 3600 29 | BuiltIn().log(f"Generated test token: {test_token[:50]}...") 30 | return test_token, expiry_time 31 | 32 | 33 | def oauth_cb_test_consumer(oauth_config): 34 | BuiltIn().set_global_variable("${SEEN_RF_OAUTH_CB_CONSUMER}", True) 35 | test_token = create_test_token() 36 | expiry_time = time.time() + 3600 37 | 38 | return test_token, expiry_time 39 | 40 | 41 | def get_test_producer_token(oauth_config=None): 42 | return functools.partial(oauth_cb_test_producer, oauth_config or {}) 43 | 44 | 45 | def get_test_consumer_token(oauth_config=None): 46 | return functools.partial(oauth_cb_test_consumer, oauth_config or {}) -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "robotframework-confluentkafkalibrary" 7 | dynamic = ["version"] 8 | description = "Confluent Kafka library for Robot Framework" 9 | readme = "README.md" 10 | authors = [ 11 | {name = "Robert Karasek", email = "robo.karasek@gmail.com"}, 12 | ] 13 | license = "Apache-2.0" 14 | license-files = ["LICEN[CS]E*"] 15 | keywords = ["robotframework", "confluent", "kafka"] 16 | classifiers = [ 17 | "Operating System :: OS Independent", 18 | "Programming Language :: Python", 19 | "Topic :: Software Development :: Testing", 20 | ] 21 | requires-python = ">=3.8" 22 | dependencies = [ 23 | "robotframework >= 3.2.1", 24 | "confluent-kafka == 2.12.2", 25 | "requests >= 2.25.1", 26 | ] 27 | 28 | [project.urls] 29 | "Homepage" = "https://github.com/robooo/robotframework-ConfluentKafkaLibrary" 30 | 31 | [project.optional-dependencies] 32 | avro = [ 33 | "fastavro >= 1.3.2", 34 | "avro >= 1.11.1", 35 | ] 36 | json = [ 37 | "jsonschema >= 3.2.0", 38 | "pyrsistent >= 0.20.0", 39 | "orjson >= 3.10", 40 | ] 41 | protobuf = [ 42 | "protobuf >= 4.22.0", 43 | "googleapis-common-protos >= 1.66.0", 44 | ] 45 | schemaregistry = [ 46 | "httpx>=0.26", 47 | "cachetools >= 5.5.0", 48 | "attrs >= 24.3.0", 49 | "certifi", 50 | "authlib >= 1.0.0", 51 | ] 52 | all = [ 53 | "robotframework-confluentkafkalibrary[avro]", 54 | "robotframework-confluentkafkalibrary[json]", 55 | "robotframework-confluentkafkalibrary[protobuf]", 56 | "robotframework-confluentkafkalibrary[schemaregistry]", 57 | ] 58 | 59 | [tool.setuptools.packages.find] 60 | where = ["src"] 61 | 62 | [tool.setuptools.dynamic] 63 | version = {attr = "ConfluentKafkaLibrary.version.VERSION"} -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: CI 4 | 5 | # Controls when the workflow will run 6 | on: 7 | # Triggers the workflow on push or pull request events but only for the master branch 8 | push: 9 | branches: [ master ] 10 | pull_request: 11 | branches: [ master ] 12 | 13 | # Allows you to run this workflow manually from the Actions tab 14 | workflow_dispatch: 15 | 16 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 17 | jobs: 18 | # This workflow contains a single job called "build" 19 | build: 20 | # The type of runner that the job will run on 21 | runs-on: ubuntu-latest 22 | 23 | # Steps represent a sequence of tasks that will be executed as part of the job 24 | steps: 25 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 26 | - uses: actions/checkout@v4 27 | 28 | - name: Spin up kafka 29 | run: cd examples && docker compose up -d && cd .. 30 | - name: Upgrade pip, setuptools, and wheel 31 | run: python3 -m pip install --upgrade pip setuptools wheel 32 | - name: Install python requirements 33 | run: pip install --force-reinstall .[all] 34 | - name: Check for broken dependencies 35 | run: pip check 36 | - name: Wait for services 37 | run: while [ -n "$(docker container ls -a | grep starting)" ]; do sleep 2; done; 38 | - name: Docker inspect 39 | run: docker inspect --format "{{json .State.Health.Status }}" $(docker compose -f examples/docker-compose.yml ps -q) 40 | - name: Show python version 41 | run: python3 --version 42 | - name: Execute tests 43 | run: cd examples && python3 -m robot -d ../docs . 44 | - name: Archive test log 45 | if: ${{ always() }} 46 | uses: actions/upload-artifact@v4 47 | with: 48 | name: log.html 49 | path: docs/log.html 50 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/serialization.py: -------------------------------------------------------------------------------- 1 | from confluent_kafka.schema_registry.avro import AvroSerializer, AvroDeserializer 2 | from confluent_kafka.schema_registry.protobuf import ProtobufSerializer, ProtobufDeserializer 3 | from confluent_kafka.schema_registry.json_schema import JSONSerializer, JSONDeserializer 4 | from confluent_kafka.serialization import (DoubleSerializer, IntegerSerializer, StringSerializer, 5 | DoubleDeserializer, IntegerDeserializer, StringDeserializer) 6 | 7 | 8 | class Serializer(): 9 | 10 | def get_avro_serializer(self, schema_str, schema_registry_client, to_dict=None, conf=None): 11 | return AvroSerializer(schema_registry_client, schema_str, to_dict, conf) 12 | 13 | def get_double_serializer(self): 14 | return DoubleSerializer() 15 | 16 | def get_integer_serializer(self): 17 | return IntegerSerializer() 18 | 19 | def get_json_serializer(self, schema_str, schema_registry_client, to_dict=None, conf=None): 20 | return JSONSerializer(schema_str, schema_registry_client, to_dict, conf) 21 | 22 | def get_protobuf_serializer(self, msg_type, schema_registry_client, conf=None): 23 | base_conf = {'use.deprecated.format': False} 24 | if conf is None: 25 | conf = base_conf.copy() 26 | else: 27 | conf.update(base_conf) 28 | 29 | return ProtobufSerializer(msg_type, schema_registry_client, conf) 30 | 31 | def get_string_serializer(self, codec='utf_8'): 32 | return StringSerializer(codec) 33 | 34 | 35 | class Deserializer(): 36 | 37 | def get_avro_deserializer(self, schema_str, schema_registry_client, from_dict=None): 38 | return AvroDeserializer(schema_registry_client, schema_str, from_dict) 39 | 40 | def get_double_deserializer(self): 41 | return DoubleDeserializer() 42 | 43 | def get_integer_deserializer(self): 44 | return IntegerDeserializer() 45 | 46 | def get_json_deserializer(self, schema_str, from_dict=None): 47 | return JSONDeserializer(schema_str, from_dict) 48 | 49 | def get_protobuf_deserializer(self, message_type): 50 | return ProtobufDeserializer(message_type, {'use.deprecated.format': False}) 51 | 52 | def get_string_deserializer(self, codec='utf_8'): 53 | return StringDeserializer(codec) 54 | -------------------------------------------------------------------------------- /examples/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | broker: 3 | image: confluentinc/cp-server:7.8.0 4 | hostname: broker 5 | container_name: broker 6 | ports: 7 | - '9092:9092' 8 | - '29092:29092' 9 | healthcheck: 10 | test: ["CMD-SHELL", "nc -z localhost 9092"] 11 | interval: 10s 12 | timeout: 5s 13 | retries: 5 14 | environment: 15 | KAFKA_NODE_ID: 1 16 | CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' 17 | KAFKA_PROCESS_ROLES: 'broker,controller' 18 | KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker:9093' 19 | KAFKA_LISTENERS: 'PLAINTEXT://broker:29092,CONTROLLER://broker:9093,PLAINTEXT_HOST://0.0.0.0:9092' 20 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092 21 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT' 22 | KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' 23 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 24 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 25 | KAFKA_CONFLUENT_COMMAND_TOPIC_REPLICATION_FACTOR: 1 26 | KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 27 | 28 | schema-registry: 29 | image: confluentinc/cp-schema-registry:7.8.0 30 | hostname: schema-registry 31 | container_name: schema-registry 32 | depends_on: 33 | broker: 34 | condition: service_healthy 35 | ports: 36 | - '8081:8081' 37 | healthcheck: 38 | test: ["CMD-SHELL", "nc -z localhost 8081"] 39 | interval: 10s 40 | timeout: 5s 41 | retries: 5 42 | environment: 43 | SCHEMA_REGISTRY_HOST_NAME: schema-registry 44 | SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' 45 | SCHEMA_REGISTRY_LISTENERS: 'http://0.0.0.0:8081' 46 | 47 | rest-proxy: 48 | image: confluentinc/cp-kafka-rest:7.8.0 49 | depends_on: 50 | broker: 51 | condition: service_healthy 52 | schema-registry: 53 | condition: service_healthy 54 | ports: 55 | - 8082:8082 56 | healthcheck: 57 | test: ["CMD-SHELL", "nc -z localhost 8082"] 58 | interval: 10s 59 | timeout: 5s 60 | retries: 5 61 | hostname: rest-proxy 62 | container_name: rest-proxy 63 | environment: 64 | KAFKA_REST_HOST_NAME: rest-proxy 65 | KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' 66 | KAFKA_REST_LISTENERS: 'http://0.0.0.0:8082' 67 | KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | 3 | # Robot generated files 4 | log.html 5 | report.html 6 | output.xml 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | pip-wheel-metadata/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | db.sqlite3 68 | db.sqlite3-journal 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # celery beat schedule file 101 | celerybeat-schedule 102 | 103 | # SageMath parsed files 104 | *.sage.py 105 | 106 | # Environments 107 | .env 108 | .venv 109 | env/ 110 | venv/ 111 | ENV/ 112 | env.bak/ 113 | venv.bak/ 114 | 115 | # Spyder project settings 116 | .spyderproject 117 | .spyproject 118 | 119 | # Rope project settings 120 | .ropeproject 121 | 122 | # mkdocs documentation 123 | /site 124 | 125 | # mypy 126 | .mypy_cache/ 127 | .dmypy.json 128 | dmypy.json 129 | 130 | # Pyre type checker 131 | .pyre/ -------------------------------------------------------------------------------- /examples/test_avro.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library Collections 4 | Library String 5 | 6 | Suite Setup Starting Test 7 | 8 | 9 | *** Test Cases *** 10 | Avro Producer Consumer With Serializers 11 | ${schema_registry_conf}= Create Dictionary url=http://127.0.0.1:8081 12 | ${schema_registry_client}= Get Schema Registry Client ${schema_registry_conf} 13 | ${schema_str}= Set Variable {"namespace": "example.avro","type": "record","name": "User","fields": [{"name": "name","type": "string"},{"name": "number","type": ["int","null"]}]} 14 | ${avro_serializer}= Get Avro Serializer ${schema_str} ${schema_registry_client} 15 | ${avro_deserializer}= Get Avro Deserializer ${schema_str} ${schema_registry_client} 16 | ${string_serializer}= Get String Serializer 17 | ${string_deserializer}= Get String Deserializer 18 | 19 | ${producer_id}= Create Producer key_serializer=${string_serializer} value_serializer=${avro_serializer} serializing=${True} 20 | ${value}= Create Dictionary name=Robot number=${10} 21 | Produce group_id=${producer_id} topic=avro_testing1 partition=${0} value=${value} key=${KEY} 22 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 23 | 24 | ${consumer_group_id}= Create Consumer auto_offset_reset=latest key_deserializer=${string_deserializer} value_deserializer=${avro_deserializer} deserializing=${True} 25 | Subscribe Topic group_id=${consumer_group_id} topics=avro_testing1 26 | Poll group_id=${consumer_group_id} # Dummy poll when using offset reset 'latest' 27 | 28 | Produce group_id=${producer_id} topic=avro_testing1 value=${value} partition=${0} key=${KEY} 29 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 30 | ${messages}= Poll group_id=${consumer_group_id} 31 | Should Be Equal ${messages} ${TEST_DATA} 32 | [Teardown] Basic Teardown ${consumer_group_id} 33 | 34 | 35 | *** Keywords *** 36 | Starting Test 37 | Set Suite Variable @{TEST_TOPIC} avro_testing1 38 | Set Suite Variable ${KEY} 568a68fd-2785-44cc-8997-1295c3755d28 39 | 40 | ${value}= Create Dictionary name=Robot number=${10} 41 | ${data}= Create List ${value} 42 | Set Suite Variable ${TEST_DATA} ${data} 43 | 44 | All Messages Are Delivered 45 | [Arguments] ${producer_id} 46 | ${count}= Flush ${producer_id} 47 | Log Reaming messages to be delivered: ${count} 48 | Should Be Equal As Integers ${count} 0 49 | 50 | Basic Teardown 51 | [Arguments] ${group_id} 52 | Unsubscribe ${group_id} 53 | Close Consumer ${group_id} 54 | ${groups}= Create List ${group_id} 55 | ${admin_client_id}= Create Admin Client 56 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 57 | Log ${resp} -------------------------------------------------------------------------------- /examples/test_protobuf.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library schema/protobuf/user_helper.py 4 | 5 | *** Test Cases *** 6 | Protobuf Producer With Serializer 7 | ${schema_registry_conf}= Create Dictionary url=http://127.0.0.1:8081 8 | ${schema_registry_client}= Get Schema Registry Client ${schema_registry_conf} 9 | ${msg_type}= Get Type 10 | ${protobuf_serializer}= Get Protobuf Serializer ${msg_type} ${schema_registry_client} 11 | ${protobuf_deserializer}= Get Protobuf Deserializer ${msg_type} 12 | ${string_serializer}= Get String Serializer 13 | 14 | ${producer_id}= Create Producer key_serializer=${string_serializer} value_serializer=${protobuf_serializer} serializing=${True} 15 | ${value}= Create User Robot 10 16 | Produce group_id=${producer_id} topic=protobuf_testing1 key=bd232464-e3d3-425d-93b7-5789dc7273c1 value=${value} 17 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 18 | 19 | Protobuf Producer Consumer With Serializer 20 | ${schema_registry_conf}= Create Dictionary url=http://127.0.0.1:8081 21 | ${schema_registry_client}= Get Schema Registry Client ${schema_registry_conf} 22 | ${msg_type}= Get Type 23 | ${protobuf_serializer}= Get Protobuf Serializer ${msg_type} ${schema_registry_client} 24 | ${protobuf_deserializer}= Get Protobuf Deserializer ${msg_type} 25 | ${string_serializer}= Get String Serializer 26 | ${string_deserializer}= Get String Deserializer 27 | 28 | ${producer_id}= Create Producer key_serializer=${string_serializer} value_serializer=${protobuf_serializer} serializing=${True} 29 | ${value}= Create User Robot 10 30 | Produce group_id=${producer_id} topic=protobuf_testing2 key=f01df0c6-ec0b-49e9-835f-d766a9e8036f value=${value} 31 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_id} 32 | 33 | ${consumer_group_id}= Create Consumer auto_offset_reset=earliest key_deserializer=${string_deserializer} value_deserializer=${protobuf_deserializer} deserializing=${True} 34 | Subscribe Topic group_id=${consumer_group_id} topics=protobuf_testing2 35 | ${messages}= Poll group_id=${consumer_group_id} 36 | Length Should Be ${messages} 1 37 | Should Be Equal ${messages[0]} ${value} 38 | [Teardown] Basic Teardown ${consumer_group_id} 39 | 40 | *** Keywords *** 41 | All Messages Are Delivered 42 | [Arguments] ${producer_id} 43 | ${count}= Flush ${producer_id} 44 | Log Reaming messages to be delivered: ${count} 45 | Should Be Equal As Integers ${count} 0 46 | 47 | Basic Teardown 48 | [Arguments] ${group_id} 49 | Unsubscribe ${group_id} 50 | Close Consumer ${group_id} 51 | ${groups}= Create List ${group_id} 52 | ${admin_client_id}= Create Admin Client 53 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 54 | Log ${resp} 55 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/producer.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from confluent_kafka import SerializingProducer 3 | from confluent_kafka import Producer 4 | 5 | class KafkaProducer(): 6 | 7 | def __init__(self): 8 | self.producers = {} 9 | 10 | def create_producer( 11 | self, 12 | server='127.0.0.1', 13 | port='9092', 14 | group_id=None, 15 | key_serializer=None, 16 | value_serializer=None, 17 | serializing=False, 18 | **kwargs 19 | ): 20 | """Create Kafka Producer and returns its `group_id` as string. 21 | 22 | Keyword Arguments: 23 | - ``server``: (str): IP address / domain, that the consumer should 24 | contact to bootstrap initial cluster metadata. 25 | Default: `127.0.0.1`. 26 | - ``port`` (int): Port number. Default: `9092`. 27 | - ``serializing`` (bool): Activate SerializingProducer with serialization capabilities. 28 | Default: `False` 29 | """ 30 | if group_id is None: 31 | group_id = str(uuid.uuid4()) 32 | 33 | if serializing: 34 | producer = SerializingProducer({ 35 | 'bootstrap.servers': '{}:{}'.format(server, port), 36 | 'key.serializer': key_serializer, 37 | 'value.serializer': value_serializer, 38 | **kwargs} 39 | ) 40 | else: 41 | producer = Producer({ 42 | 'bootstrap.servers': '{}:{}'.format(server, port), 43 | **kwargs}) 44 | 45 | self.producers[group_id] = producer 46 | return group_id 47 | 48 | def produce( 49 | self, 50 | group_id, 51 | topic, 52 | value=None, 53 | key=None, 54 | headers=None, 55 | **kwargs 56 | ): 57 | """Produce message to topic asynchronously to Kafka by encoding with specified or default avro schema.\n 58 | https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Producer.produce 59 | 60 | - ``topic`` (str) : name of the topic where to produce message. 61 | - ``value`` (str|bytes): Message payload. 62 | - ``key`` (str|bytes): Message key. Default: `None`. 63 | - ``headers`` (dict[str, bytes]): Message headers. Default: `None`. 64 | - ``partition`` (int): Partition to produce to, else uses the configured built-in partitioner. 65 | """ 66 | self.producers[group_id].produce( 67 | topic=topic, 68 | value=value, 69 | key=key, 70 | headers=headers, 71 | **kwargs 72 | ) 73 | 74 | def flush(self, group_id, timeout=0.1): 75 | """Wait for all messages in the Producer queue to be delivered. Returns the number of messages still in queue. 76 | This is a convenience method that calls poll() until len() is zero or the optional timeout elapses. 77 | - `timeout` (real) : Optional timeout. Default: `0.1`. 78 | """ 79 | messages_in_queue = self.producers[group_id].flush(timeout) 80 | return messages_in_queue 81 | 82 | def purge(self, group_id, **kwargs): 83 | """Purge messages currently handled by the producer instance. 84 | """ 85 | self.producers[group_id].purge(**kwargs) 86 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Robot Framework - ConfluentKafkaLibrary 2 | 3 | ConfluentKafkaLibrary library is a wrapper for the [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python). 4 | 5 | ConfluentKafkaLibrary is compatible with the latest version of confluent-kafka-python, where the library versions have a 1:1 correspondence (e.g., ConfluentKafkaLibrary 2.9.0 corresponds to confluent-kafka-python 2.9.0). Bug fixes and updates are denoted by a post-release identifier, such as `2.9.0.post1`. 6 | 7 | ## Documentation 8 | 9 | The keyword documentation for ConfluentKafkaLibrary can be found [here](https://robooo.github.io/robotframework-ConfluentKafkaLibrary/) 10 | 11 | To generate the documentation, use the following command: 12 | 13 | ``` 14 | python -m robot.libdoc -f html src/ConfluentKafkaLibrary docs/index.html 15 | ``` 16 | 17 | ## Installation 18 | 19 | To install the library, run the following command: 20 | 21 | ``` 22 | pip install robotframework-confluentkafkalibrary 23 | ``` 24 | 25 | Extra packages: 26 | * `[avro]` = `['fastavro >= 1.3.2', 'avro >= 1.11.1']` 27 | * `[json]` = `['jsonschema >= 3.2.0', 'pyrsistent >= 0.20.0']` 28 | * `[protobuf]` = `['protobuf >= 4.22.0', 'googleapis-common-protos >= 1.66.0']` 29 | * `[schemaregistry]` = `['httpx>=0.26', 'cachetools >= 5.5.0', 'attrs >= 24.3.0']` 30 | 31 | * To install all dependencies use `[all]` extension like: 32 | 33 | ``` 34 | pip install robotframework-confluentkafkalibrary[all] 35 | ``` 36 | 37 | ## Usage 38 | 39 | In most cases, you can refer to the [confluent-kafka-python documentation](https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html) for guidance. Every keyword in ConfluentKafkaLibrary is designed to match the corresponding Python functions. If you are unsure about the pre-configured keywords, please visit the [robotframework-ConfluentKafkaLibrary documentation](https://robooo.github.io/robotframework-ConfluentKafkaLibrary/). The Kafka team maintains the up-to-date documentation for configuration properties and their values [here](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). 40 | 41 | * You can find basic usage examples in the [./examples/test.robot](./examples/test.robot) 42 | * For more complex examples, such as handling byte data from a topic, using multiple consumers, or running threaded avro consumers, please refer to the [documentation](https://robooo.github.io/robotframework-ConfluentKafkaLibrary/#Examples). 43 | 44 | ## Testing 45 | 46 | * The library is tested using black-box tests written in Robot Framework. 47 | * You can find the test files in the [examples/ directory](./examples) directory. 48 | * For testing, a dockerized enterprise Kafka platform with schema registry support and REST proxy is used. The platform is deployed and tested for each pull request and merge to the master branch. 49 | * See [docker-compose.yml](./examples/docker-compose.yml) file with the necessary configuration. 50 | * Tests are divided into the following files: 51 | * test.robot - Basic tests to verify functionality of the Consumer and Producer. 52 | * test_adminclient.robot - Verifications of admin client functionality. 53 | * test_avro.robot - Verifications of avro and serializers functionality. 54 | * Not executable example of oauth usage can be found [here](https://github.com/robooo/robotframework-ConfluentKafkaLibrary/blob/master/examples/test_oauth.robot#L14) 55 | * Update of deployment https://github.com/robooo/robotframework-ConfluentKafkaLibrary/issues/21 is required. 56 | * The core testing logic involves producing data to Kafka, connecting one consumer in a thread, and working with the results in specific test cases. 57 | 58 | ## Known Limitations: 59 | * Unable to install robotframework-confluentkafkalibrary on Amazon EC2 graviton instance type 60 | * see the [steps to resolve](https://github.com/robooo/robotframework-ConfluentKafkaLibrary/issues/33#issuecomment-1464644752) 61 | -------------------------------------------------------------------------------- /examples/test_adminclient.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library Collections 4 | 5 | 6 | *** Test Cases *** 7 | AdminClient Topic Creation 8 | ${topic_names}= Create List admintesting1 admintesting2 admintesting3 9 | ${topics}= Create List 10 | FOR ${topic} IN @{topic_names} 11 | ${topic}= New Topic ${topic} num_partitions=${1} replication_factor=${1} 12 | Append To List ${topics} ${topic} 13 | END 14 | 15 | ${admin_client_id}= Create Admin Client 16 | ${results}= Create Topics group_id=${admin_client_id} new_topics=${topics} 17 | Log ${results} 18 | ${topics}= List Topics ${admin_client_id} 19 | FOR ${topic} IN @{topic_names} 20 | List Should Contain Value ${topics} ${topic} 21 | END 22 | [Teardown] Delete Topics ${admin_client_id} ${topic_names} 23 | 24 | AdminClient List Consumer Groups 25 | ${producer_group_id}= Create Producer 26 | Produce ${producer_group_id} topic=adminlisttest value=Hello partition=${0} 27 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_group_id} 28 | 29 | ${group_id}= Create Consumer auto_offset_reset=earliest 30 | Subscribe Topic ${group_id} topics=adminlisttest 31 | Sleep 2s # Wait for subscription 32 | 33 | ${admin_client_id}= Create Admin Client 34 | ${states}= Create List ${CONSUMER_GROUP_STATE_STABLE} 35 | ${types}= Create List ${CONSUMER_GROUP_TYPE_CLASSIC} 36 | ${groups}= List Groups ${admin_client_id} states=${states} types=${types} 37 | Log ${groups} 38 | Log ${groups.valid} 39 | FOR ${group} IN @{groups.valid} 40 | Log ${group.group_id} 41 | IF "${group_id}" == "${group.group_id}" 42 | Log ${group.group_id} 43 | Log ${group.state} 44 | Log ${group.type} 45 | Pass Execution "Consumer found in list" 46 | END 47 | END 48 | Fail 49 | [Teardown] Basic Teardown ${group_id} 50 | 51 | AdminClient Describe Consumer Groups 52 | [Documentation] Finish the test with memebers + verification 53 | ${producer_group_id}= Create Producer 54 | Produce ${producer_group_id} topic=admindescribetest value=Hello partition=${0} 55 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_group_id} 56 | 57 | ${group_id}= Create Consumer auto_offset_reset=earliest 58 | Subscribe Topic ${group_id} topics=admindescribetest 59 | Sleep 2s # Wait for subscription 60 | ${group2_id}= Create Consumer auto_offset_reset=earliest 61 | Subscribe Topic ${group2_id} topics=admindescribetest 62 | Sleep 2s # Wait for subscription 63 | ${groups}= Create List ${group_id} ${group2_id} 64 | 65 | ${admin_client_id}= Create Admin Client 66 | ${described_groups}= Describe Groups ${admin_client_id} group_ids=${groups} 67 | Log ${described_groups} 68 | 69 | FOR ${member} IN @{described_groups["${group_id}"].members} 70 | Log ${member} 71 | END 72 | Log ${described_groups["${group_id}"].state} 73 | Log ${described_groups["${group2_id}"].state} 74 | 75 | [Teardown] Run Keywords Basic Teardown ${group_id} AND 76 | ... Basic Teardown ${group2_id} 77 | 78 | AdminClient Delete Consumer Groups 79 | ${producer_group_id}= Create Producer 80 | Produce ${producer_group_id} topic=admindeltest value=Hello partition=${0} 81 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${producer_group_id} 82 | 83 | ${group_id}= Create Consumer auto_offset_reset=earliest 84 | Subscribe Topic ${group_id} topics=admindeltest 85 | Sleep 2s # Wait for subscription 86 | ${group2_id}= Create Consumer auto_offset_reset=earliest 87 | Subscribe Topic ${group2_id} topics=admindeltest 88 | Sleep 2s # Wait for subscription 89 | ${groups}= Create List ${group2_id} 90 | ${messages}= Poll group_id=${group2_id} max_records=5 91 | Sleep 1s 92 | Unsubscribe ${group2_id} 93 | Close Consumer ${group2_id} 94 | 95 | ${admin_client_id}= Create Admin Client 96 | ${deletion}= Delete Groups ${admin_client_id} group_ids=${groups} 97 | Should Be Equal ${deletion["${group2_id}"]} ${None} 98 | 99 | ${current_groups}= List Groups ${admin_client_id} 100 | Log ${current_groups.valid} 101 | FOR ${group} IN @{current_groups.valid} 102 | Log ${group.group_id} 103 | IF "${group_id}" == "${group.group_id}" 104 | Log ${group.group_id} 105 | Log ${group.state} 106 | Log "Consumer found in list" 107 | END 108 | IF "${group2_id}" == "${group.group_id}" 109 | Log ${group.group_id} 110 | Log ${group.state} 111 | Fail "Group 1 consumer was not removed!" 112 | END 113 | END 114 | [Teardown] Basic Teardown ${group_id} 115 | 116 | AdminClient New Partitions 117 | ${topic_name}= Set Variable admin_testing_partition 118 | ${topic}= New Topic ${topic_name} num_partitions=${1} replication_factor=${1} 119 | ${admin_client_id}= Create Admin Client 120 | Create Topics group_id=${admin_client_id} new_topics=${topic} 121 | 122 | ${new_parts}= New Partitions ${topic_name} new_total_count=${2} 123 | ${resp}= Create Partitions group_id=${admin_client_id} new_partitions=${new_parts} 124 | Log ${resp} 125 | [Teardown] Delete Topics ${admin_client_id} ${topic_name} 126 | 127 | AdminClient Describe Configs 128 | ${resource}= Config Resource ${ADMIN_RESOURCE_BROKER} 1 129 | Log ${resource.name} 130 | ${admin_client_id}= Create Admin Client 131 | ${config}= Describe Configs ${admin_client_id} ${resource} 132 | Log ${config} 133 | 134 | Should Not Be Empty ${config} 135 | ${name}= Set Variable ${config["${resource.name}"]['offsets.commit.timeout.ms'].name} 136 | ${value}= Set Variable ${config["${resource.name}"]['offsets.commit.timeout.ms'].value} 137 | Should Be Equal As Strings ${name} offsets.commit.timeout.ms 138 | Should Be Equal As Integers ${value} ${5000} 139 | 140 | AdminClient Alter Configs 141 | ${data}= Create Dictionary log.retention.ms=${54321} # DotDict 142 | ${data}= Convert To Dictionary ${data} # dict 143 | ${resource}= Config Resource ${ADMIN_RESOURCE_BROKER} 1 set_config=${data} 144 | ${admin_client_id}= Create Admin Client 145 | 146 | ${resp}= Alter Configs ${admin_client_id} ${resource} 147 | Log ${resp} 148 | Sleep 1s 149 | ${config}= Describe Configs ${admin_client_id} ${resource} 150 | Should Be Equal As Integers ${54321} ${config["${resource.name}"]['log.retention.ms'].value} 151 | 152 | AdminClient Describe Topics 153 | ${topic_names}= Create List admintesting1 admintesting2 admintesting3 154 | ${topics}= Create List 155 | FOR ${topic} IN @{topic_names} 156 | ${topic}= New Topic ${topic} num_partitions=${1} replication_factor=${1} 157 | Append To List ${topics} ${topic} 158 | END 159 | 160 | ${admin_client_id}= Create Admin Client 161 | ${results}= Create Topics group_id=${admin_client_id} new_topics=${topics} 162 | Log ${results} 163 | 164 | ${results}= Describe Topics ${admin_client_id} ${topic_names} 165 | Log ${results} 166 | FOR ${topic} IN @{topic_names} 167 | ${status}= Evaluate len("${results["${topic}"].topic_id}") > 0 168 | Should Be True ${status} 169 | END 170 | [Teardown] Delete Topics ${admin_client_id} ${topic_names} 171 | 172 | AdminClient Describe Cluster 173 | ${admin_client_id}= Create Admin Client 174 | ${cluster}= Describe Cluster ${admin_client_id} 175 | Should Not Be Empty ${cluster.cluster_id} 176 | 177 | *** Keywords *** 178 | All Messages Are Delivered 179 | [Arguments] ${producer_id} 180 | ${count}= Flush ${producer_id} 181 | Log Reaming messages to be delivered: ${count} 182 | Should Be Equal As Integers ${count} 0 183 | 184 | Basic Teardown 185 | [Arguments] ${group_id} 186 | Unsubscribe ${group_id} 187 | Close Consumer ${group_id} 188 | ${groups}= Create List ${group_id} 189 | ${admin_client_id}= Create Admin Client 190 | Delete Groups ${admin_client_id} group_ids=${groups} -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/__init__.py: -------------------------------------------------------------------------------- 1 | import confluent_kafka 2 | from confluent_kafka import ConsumerGroupState 3 | 4 | try: 5 | from confluent_kafka.schema_registry import SchemaRegistryClient 6 | _SCHEMA_REGISTRY_CLIENT_AVAILABLE = True 7 | _SCHEMA_REGISTRY_IMPORT_ERROR = None 8 | except ImportError as e: 9 | _SCHEMA_REGISTRY_IMPORT_ERROR = e 10 | _SCHEMA_REGISTRY_CLIENT_AVAILABLE = False 11 | 12 | from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, ConfigResource 13 | from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError 14 | from .consumer import KafkaConsumer 15 | from .producer import KafkaProducer 16 | from .admin_client import KafkaAdminClient 17 | from .version import VERSION 18 | 19 | IMPORTS = KafkaConsumer, KafkaProducer, KafkaAdminClient 20 | if _SCHEMA_REGISTRY_CLIENT_AVAILABLE: 21 | try: 22 | from .serialization import Serializer, Deserializer 23 | IMPORTS += Serializer, Deserializer 24 | except ImportError as e: 25 | print(e) 26 | pass 27 | 28 | #class ConfluentKafkaLibrary(KafkaConsumer, KafkaProducer, Serializer, Deserializer): 29 | class ConfluentKafkaLibrary(*IMPORTS): 30 | """ConfluentKafkaLibrary is a Robot Framework library which wraps up 31 | [https://github.com/confluentinc/confluent-kafka-python | confluent-kafka-python]. 32 | Library supports more functionality like running more clients based on `group_id` 33 | or running them in threaded mode during the tests, decoding of gathered data etc. (`See` `Examples`). 34 | 35 | This document explains how to use keywords provided by ConfluentKafkaLibrary. 36 | For information about installation, support, and more, please visit the 37 | [https://github.com/robooo/robotframework-ConfluentKafkaLibrary | project github page]. 38 | For more information about Robot Framework, see http://robotframework.org. 39 | 40 | == Examples == 41 | See [https://github.com/robooo/robotframework-ConfluentKafkaLibrary/tree/master/examples | repo examples]. 42 | 43 | *Basic Consumer with predefined group_id* 44 | 45 | | ${group_id}= | `Create Consumer` | group_id=mygroup | # if group_id is not defined uuid4() is generated | 46 | | `Subscribe Topic` | group_id=${group_id} | topics=test_topic | 47 | | ${result}= | `Poll` | group_id=${group_id} | max_records=5 | 48 | | `Log` | ${result} | 49 | | `Unsubscribe` | ${group_id} | 50 | | `Close Consumer` | ${group_id} | 51 | 52 | *More Consumers* 53 | 54 | | ${group_id_1}= | `Create Consumer` | 55 | | `Subscribe Topic` | group_id=${group_id_1} | topics=topic1 | 56 | | ${group_id_2}= | `Create Consumer` | 57 | | `Subscribe Topic` | group_id=${group_id_2} | topics=topic2 | 58 | | ${result_1}= | `Poll` | group_id=${group_id_1} | max_records=5 | 59 | | ${result_2}= | `Poll` | group_id=${group_id_2} | max_records=2 | 60 | | `Unsubscribe` | ${group_id_1} | 61 | | `Unsubscribe` | ${group_id_2} | 62 | | `Close Consumer` | ${group_id_1} | 63 | | `Close Consumer` | ${group_id_2} | 64 | 65 | *Handle Byte Data From Topic* 66 | 67 | | ${messages}= | Poll | group_id=${group_id} | max_records=3 | decode_format=utf_8 | 68 | | ${json} | Convert String to JSON | ${messages}[0] | 69 | | ${jsonValue} | Get value from JSON | ${json} | $.key | 70 | 71 | """ 72 | 73 | ROBOT_LIBRARY_VERSION = VERSION 74 | ROBOT_LIBRARY_SCOPE = 'GLOBAL' 75 | 76 | def __init__(self): 77 | KafkaConsumer.__init__(self) 78 | KafkaProducer.__init__(self) 79 | KafkaAdminClient.__init__(self) 80 | self._set_globals_variables_if_robot_running() 81 | 82 | def _set_globals_variables_if_robot_running(self): 83 | try: 84 | BuiltIn().set_global_variable('${OFFSET_BEGINNING}', confluent_kafka.OFFSET_BEGINNING) 85 | BuiltIn().set_global_variable('${OFFSET_END}', confluent_kafka.OFFSET_END) 86 | BuiltIn().set_global_variable('${OFFSET_STORED}', confluent_kafka.OFFSET_STORED) 87 | BuiltIn().set_global_variable('${OFFSET_INVALID}', confluent_kafka.OFFSET_INVALID) 88 | BuiltIn().set_global_variable('${ADMIN_RESOURCE_BROKER}', confluent_kafka.admin.RESOURCE_BROKER) 89 | BuiltIn().set_global_variable('${ADMIN_RESOURCE_GROUP}', confluent_kafka.admin.RESOURCE_GROUP) 90 | BuiltIn().set_global_variable('${ADMIN_RESOURCE_TOPIC}', confluent_kafka.admin.RESOURCE_TOPIC) 91 | 92 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_UNKNOWN}', confluent_kafka.ConsumerGroupState.UNKNOWN) 93 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_PREPARING_REBALANCING}', confluent_kafka.ConsumerGroupState.PREPARING_REBALANCING) 94 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_COMPLETING_REBALANCING}', confluent_kafka.ConsumerGroupState.COMPLETING_REBALANCING) 95 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_STABLE}', confluent_kafka.ConsumerGroupState.STABLE) 96 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_DEAD}', confluent_kafka.ConsumerGroupState.DEAD) 97 | BuiltIn().set_global_variable('${CONSUMER_GROUP_STATE_EMPTY}', confluent_kafka.ConsumerGroupState.EMPTY) 98 | 99 | BuiltIn().set_global_variable('${CONSUMER_GROUP_TYPE_UNKNOWN}', confluent_kafka.ConsumerGroupType.UNKNOWN) 100 | BuiltIn().set_global_variable('${CONSUMER_GROUP_TYPE_CONSUMER}', confluent_kafka.ConsumerGroupType.CONSUMER) 101 | BuiltIn().set_global_variable('${CONSUMER_GROUP_TYPE_CLASSIC}', confluent_kafka.ConsumerGroupType.CLASSIC) 102 | except RobotNotRunningError as e: 103 | pass 104 | 105 | def list_topics(self, group_id, topic=None): 106 | """Request Metadata from cluster. Could be executed with consumer or producer group_id too. 107 | - ``topic`` (str): If specified, only request info about this topic, else return for all topics in cluster. 108 | Default: `None`. 109 | - ``group_id`` (str): *required* id of the created consumer or producer. 110 | """ 111 | if group_id is None: 112 | raise TypeError 113 | 114 | if group_id in self.admin_clients: 115 | return self.admin_clients[group_id].list_topics().topics 116 | if group_id in self.consumers: 117 | return self.consumers[group_id].list_topics(topic).topics 118 | if group_id in self.producers: 119 | return self.producers[group_id].list_topics(topic).topics 120 | 121 | raise ValueError('Consumer or producer group_id is wrong or does not exists!') 122 | 123 | def new_topic(self, topic, **kwargs): 124 | """Instantiate a NewTopic object. Specifies per-topic settings for passing to AdminClient.create_topics(). 125 | - ``topic`` (str): Topic name 126 | Note: In a multi-cluster production scenario, it is more typical to use a 127 | replication_factor of 3 for durability. 128 | """ 129 | return NewTopic(topic=topic, **kwargs) 130 | 131 | def new_partitions(self, topic, **kwargs): 132 | """Instantiate a NewPartitions object. 133 | - ``topic`` (str): Topic name 134 | """ 135 | return NewPartitions(topic=topic, **kwargs) 136 | 137 | def config_resource(self, restype, name, **kwargs): 138 | """Represents a resource that has configuration, and (optionally) a collection of configuration properties 139 | for that resource. Used by describe_configs() and alter_configs(). 140 | - ``restype`` (ConfigResource.Type): The resource type. 141 | - ``name`` (str): The resource name, which depends on the resource type. For RESOURCE_BROKER, 142 | the resource name is the broker id. 143 | """ 144 | return ConfigResource(restype=restype, name=name, **kwargs) 145 | 146 | def get_schema_registry_client(self, conf): 147 | if not _SCHEMA_REGISTRY_CLIENT_AVAILABLE: 148 | raise ImportError( 149 | "SchemaRegistry requires additional dependencies to be installed or one of its transitive dependencies is missing. " 150 | "Please install with 'pip install robotframework-confluentkafkalibrary[schemaregistry]'.\n" 151 | "If the error persists, check for missing dependencies in your environment.\n" 152 | f"ImportError: {_SCHEMA_REGISTRY_IMPORT_ERROR}" 153 | ) 154 | return SchemaRegistryClient(conf) 155 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/admin_client.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from confluent_kafka.admin import AdminClient 3 | from confluent_kafka import KafkaException, TopicCollection 4 | 5 | 6 | class KafkaAdminClient(): 7 | 8 | def __init__(self): 9 | self.admin_clients = {} 10 | 11 | def create_admin_client( 12 | self, 13 | group_id=None, 14 | server="127.0.0.1", 15 | port="9092", 16 | **kwargs 17 | ): 18 | if group_id is None: 19 | group_id = str(uuid.uuid4()) 20 | 21 | admin_client = AdminClient({ 22 | 'bootstrap.servers': '{}:{}'.format(server, port), 23 | **kwargs}) 24 | 25 | self.admin_clients[group_id] = admin_client 26 | return group_id 27 | 28 | def list_groups(self, group_id, states=None, types=None, request_timeout=10): 29 | """List consumer groups. 30 | - ``states`` (list(ConsumerGroupState)): filter consumer groups which are currently in these states. 31 | For example usage see 'AdminClient List Consumer Groups' at 32 | examples/test_adminclient.py 33 | Default: `None`. 34 | - ``request_timeout`` (int): Maximum response time before timing out. 35 | Default: `10`. 36 | """ 37 | states = states or [] 38 | types = types or [] 39 | 40 | future = self.admin_clients[group_id].list_consumer_groups(request_timeout=request_timeout, states=set(states), types=set(types)) 41 | return future.result() 42 | 43 | def describe_groups(self, group_id, group_ids, request_timeout=10, **kwargs): 44 | """Describe consumer groups. 45 | - ``group_ids`` (list(str)): List of group_ids which need to be described. 46 | - ``request_timeout`` (int): Maximum response time before timing out. 47 | Default: `10`. 48 | """ 49 | response = self.admin_clients[group_id].describe_consumer_groups(group_ids, request_timeout=request_timeout, **kwargs) 50 | 51 | groups_results={} 52 | for con_id in group_ids: 53 | try: 54 | if response[con_id].exception() is None: 55 | groups_results[con_id] = response[con_id].result() 56 | else: 57 | groups_results[con_id] = response[con_id].exception() 58 | except KafkaException as e: 59 | return f"Failed to describe group {con_id}: {e}" 60 | except (TypeError, ValueError ) as e: 61 | return f"Invalid input: {e}" 62 | return groups_results 63 | 64 | def delete_groups(self, group_id, group_ids, request_timeout=10): 65 | """Delete the given consumer groups. 66 | - ``group_ids`` (list(str)): List of group_ids which need to be deleted. 67 | - ``request_timeout`` (int): Maximum response time before timing out. 68 | Default: `10`. 69 | """ 70 | response = self.admin_clients[group_id].delete_consumer_groups(group_ids, request_timeout=request_timeout) 71 | 72 | groups_results={} 73 | for con_id in group_ids: 74 | try: 75 | if response[con_id].exception() is None: 76 | groups_results[con_id] = response[con_id].result() 77 | else: 78 | groups_results[con_id] = response[con_id].exception() 79 | except KafkaException as e: 80 | return f"Failed to delete group {con_id}: {e}" 81 | except (TypeError, ValueError ) as e: 82 | return f"Invalid input: {e}" 83 | return groups_results 84 | 85 | def create_topics(self, group_id, new_topics, **kwargs): 86 | """Create one or more new topics and wait for each one to finish. 87 | - ``new_topics`` (list(NewTopic) or NewTopic): A list of specifications (NewTopic) 88 | or a single instance for the topics that should be created. 89 | """ 90 | fs = None 91 | if isinstance(new_topics, list): 92 | fs = self.admin_clients[group_id].create_topics(new_topics, **kwargs) 93 | else: 94 | fs = self.admin_clients[group_id].create_topics([new_topics], **kwargs) 95 | 96 | topics_results={} 97 | for topic, f in fs.items(): 98 | try: 99 | if f.exception() is None: 100 | topics_results[topic] = f.result() 101 | else: 102 | topics_results[topic] = f.exception() 103 | except KafkaException as e: 104 | return f"Failed to create topic {topic}: {e}" 105 | except (TypeError, ValueError ) as e: 106 | return f"Invalid input: {e}" 107 | return topics_results 108 | 109 | def delete_topics(self, group_id, topics, **kwargs): 110 | if isinstance(topics, str): 111 | topics = [topics] 112 | 113 | fs = self.admin_clients[group_id].delete_topics(topics, **kwargs) 114 | 115 | topics_results={} 116 | for topic, f in fs.items(): 117 | try: 118 | if f.exception() is None: 119 | topics_results[topic] = f.result() 120 | else: 121 | topics_results[topic] = f.exception() 122 | except KafkaException as e: 123 | return f"Failed to delete topic {topic}: {e}" 124 | except (TypeError, ValueError ) as e: 125 | return f"Invalid input: {e}" 126 | return topics_results 127 | 128 | def create_partitions(self, group_id, new_partitions, **kwargs): 129 | """Create additional partitions for the given topics. 130 | - ``new_partitions`` (list(NewPartitions) or NewPartitions): New partitions to be created. 131 | """ 132 | fs = None 133 | if isinstance(new_partitions, list): 134 | fs = self.admin_clients[group_id].create_partitions(new_partitions, **kwargs) 135 | else: 136 | fs = self.admin_clients[group_id].create_partitions([new_partitions], **kwargs) 137 | 138 | partitions_results={} 139 | for partition, f in fs.items(): 140 | try: 141 | if f.exception() is None: 142 | partitions_results[partition] = f.result() 143 | else: 144 | partitions_results[partition] = f.exception() 145 | except KafkaException as e: 146 | return f"Failed to add partitions to topic {partition}: {e}" 147 | except (TypeError, ValueError ) as e: 148 | return f"Invalid input: {e}" 149 | return partitions_results 150 | 151 | def describe_configs(self, group_id, resources, **kwargs): 152 | """Get the configuration of the specified resources. 153 | - ``resources`` (list(ConfigResource) or ConfigResource): Resources to get the configuration for. 154 | """ 155 | fs = None 156 | if isinstance(resources, list): 157 | fs = self.admin_clients[group_id].describe_configs(resources, **kwargs) 158 | else: 159 | fs = self.admin_clients[group_id].describe_configs([resources], **kwargs) 160 | 161 | config_results={} 162 | for config, f in fs.items(): 163 | try: 164 | if f.exception() is None: 165 | config_results[config.name] = f.result() 166 | else: 167 | config_results[config.name] = f.exception() 168 | except KafkaException as e: 169 | return f"Failed to describe config {config.name}: {e}" 170 | except (TypeError, ValueError ) as e: 171 | return f"Invalid input: {e}" 172 | return config_results 173 | 174 | def describe_topics(self, group_id, topics, **kwargs): 175 | """Describe topics. 176 | - ``topics`` (list(str) or str): List of topic names or only topic name to describe. 177 | """ 178 | if isinstance(topics, list): 179 | topics = TopicCollection(topics) 180 | else: 181 | topics = TopicCollection([topics]) 182 | 183 | topics = self.admin_clients[group_id].describe_topics(topics, **kwargs) 184 | topics_results={} 185 | for topic, f in topics.items(): 186 | try: 187 | if f.exception() is None: 188 | topics_results[topic] = f.result() 189 | else: 190 | topics_results[topic] = f.exception() 191 | except KafkaException as e: 192 | return f"Failed to describe topic {topic.name}: {e}" 193 | except (TypeError, ValueError ) as e: 194 | return f"Invalid input: {e}" 195 | return topics_results 196 | 197 | def describe_cluster(self, group_id, **kwargs): 198 | """Describe cluster. 199 | """ 200 | cluster = self.admin_clients[group_id].describe_cluster(**kwargs) 201 | try: 202 | if cluster.exception() is None: 203 | cluster = cluster.result() 204 | else: 205 | cluster = cluster.exception() 206 | except KafkaException as e: 207 | return f"Failed to describe cluster: {e}" 208 | except (TypeError, ValueError ) as e: 209 | return f"Invalid input: {e}" 210 | return cluster 211 | 212 | def alter_configs(self, group_id, resources, **kwargs): 213 | """Update configuration properties for the specified resources. 214 | - ``resources`` (list(ConfigResource) or ConfigResource): Resources to update configuration of. 215 | """ 216 | fs = None 217 | if isinstance(resources, list): 218 | fs = self.admin_clients[group_id].alter_configs(resources, **kwargs) 219 | else: 220 | fs = self.admin_clients[group_id].alter_configs([resources], **kwargs) 221 | 222 | config_results={} 223 | for config, f in fs.items(): 224 | try: 225 | if f.exception() is None: 226 | config_results[config.name] = f.result() 227 | else: 228 | config_results[config.name] = f.exception() 229 | except KafkaException as e: 230 | return f"Failed to alter config {config.name}: {e}" 231 | except (TypeError, ValueError ) as e: 232 | return f"Invalid input: {e}" 233 | return config_results -------------------------------------------------------------------------------- /examples/test.robot: -------------------------------------------------------------------------------- 1 | *** Settings *** 2 | Library ConfluentKafkaLibrary 3 | Library Collections 4 | 5 | Suite Setup Starting Test 6 | Suite Teardown Stop Thread 7 | 8 | *** Test Cases *** 9 | Verify Topics 10 | ${group_id}= Create Consumer auto_offset_reset=earliest 11 | ${topics}= List Topics ${group_id} 12 | Dictionary Should Contain Key ${topics} ${TEST_TOPIC} 13 | 14 | ${topics_thread}= List Topics ${THREADED_GROUPID} 15 | Dictionary Should Contain Key ${topics} ${TEST_TOPIC} 16 | [Teardown] Close Consumer ${group_id} 17 | 18 | Basic Consumer 19 | ${group_id}= Create Consumer auto_offset_reset=earliest 20 | Subscribe Topic group_id=${group_id} topics=${TEST_TOPIC} 21 | ${messages}= Poll group_id=${group_id} max_records=3 decode_format=utf8 22 | ${data}= Create List Hello World {'test': 1} 23 | Lists Should Be Equal ${messages} ${data} 24 | [Teardown] Basic Teardown ${group_id} 25 | 26 | Produce Without Value 27 | ${topic_name}= Set Variable topicwithoutvaluee 28 | Produce group_id=${PRODUCER_ID} topic=${topic_name} 29 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 30 | ${group_id}= Create Consumer auto_offset_reset=earliest 31 | Subscribe Topic group_id=${group_id} topics=${topic_name} 32 | ${messages}= Poll group_id=${group_id} max_records=1 33 | Should Be Equal As Strings ${messages} [None] 34 | [Teardown] Basic Teardown ${group_id} 35 | 36 | Verify Position 37 | ${group_id}= Create Consumer 38 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${OFFSET_END} 39 | Assign To Topic Partition ${group_id} ${tp} 40 | Sleep 5sec # Need to wait for an assignment 41 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 42 | ${position_before}= Set Variable ${position[0].offset} 43 | 44 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Dummy partition=${P_ID} 45 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 46 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 47 | ${position_after_produce}= Set Variable ${position[0].offset} 48 | Should Be Equal As Integers ${position_before} ${position_after_produce} 49 | 50 | ${messages}= Poll group_id=${group_id} max_records=1 decode_format=utf8 51 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 52 | ${position_after_poll_1}= Set Variable ${position[0].offset} 53 | Should Not Be Equal As Integers ${position_after_poll_1} ${position_after_produce} 54 | 55 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Dummy partition=${P_ID} 56 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 57 | ${messages}= Poll group_id=${group_id} max_records=1 decode_format=utf8 58 | ${position}= Get Position group_id=${group_id} topic_partitions=${tp} 59 | ${position_after_poll_2}= Set Variable ${position[0].offset} 60 | Should Be Equal As Integers ${position_after_poll_1 + 1} ${position_after_poll_2} 61 | [Teardown] Basic Teardown ${group_id} 62 | 63 | Consumer With Assignment To Last Message After Get Of Watermark Offsets 64 | ${group_id}= Create Consumer 65 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} 66 | ${offset}= Get Watermark Offsets ${group_id} ${tp} 67 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${offset[1]} 68 | Assign To Topic Partition ${group_id} ${tp} 69 | Prepare Data 70 | ${messages}= Poll group_id=${group_id} max_records=6 decode_format=utf8 71 | Lists Should Be Equal ${TEST_DATA} ${messages} 72 | [Teardown] Basic Teardown ${group_id} 73 | 74 | Consumer With Assignment To OFFSET_END 75 | ${group_id}= Create Consumer 76 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${OFFSET_END} 77 | Assign To Topic Partition ${group_id} ${tp} 78 | # Need to wait for an async assignment, be aware the Is Assigned could return True but 79 | # that doesn't mean assignment is completed 80 | Sleep 5sec 81 | Prepare Data 82 | ${messages}= Poll group_id=${group_id} poll_attempts=30 max_records=6 timeout=5 decode_format=utf8 83 | Lists Should Be Equal ${TEST_DATA} ${messages} 84 | [Teardown] Unassign Teardown ${group_id} 85 | 86 | Verify Test And Threaded Consumer 87 | [Setup] Clear Messages From Thread ${MAIN_THREAD} 88 | ${group_id}= Create Consumer 89 | Subscribe Topic group_id=${group_id} topics=${TEST_TOPIC} 90 | ${messages}= Poll group_id=${group_id} 91 | Prepare Data 92 | ${thread_messages}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 93 | ${messages}= Poll group_id=${group_id} max_records=6 decode_format=utf8 94 | Lists Should Be Equal ${thread_messages} ${messages} 95 | [Teardown] Run Keywords Basic Teardown ${group_id} AND 96 | ... Clear Messages From Thread ${MAIN_THREAD} 97 | 98 | Verify Clean Of Threaded Consumer Messages 99 | [Setup] Prepare Data 100 | ${thread_messages1}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 101 | Clear Messages From Thread ${MAIN_THREAD} 102 | ${thread_messages2}= Get Messages From Thread ${MAIN_THREAD} 103 | Lists Should Be Equal ${TEST_DATA} ${thread_messages1} 104 | Should Be Empty ${thread_messages2} 105 | [Teardown] Clear Messages From Thread ${MAIN_THREAD} 106 | 107 | Remove And Publish New Messages From Threaded Consumer 108 | [Setup] Prepare Data 109 | ${thread_messages1}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 110 | Clear Messages From Thread ${MAIN_THREAD} 111 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=After partition=${P_ID} 112 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Clear partition=${P_ID} 113 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 114 | Sleep 1sec # if next command is polling messages in thread we need to wait a second 115 | 116 | ${thread_messages2}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 117 | ${data}= Create List After Clear 118 | Should Be Equal ${data} ${thread_messages2} 119 | 120 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=LAST partition=${P_ID} 121 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 122 | Sleep 1sec 123 | Append To List ${data} LAST 124 | ${thread_messages2}= Get Messages From Thread ${MAIN_THREAD} decode_format=utf-8 125 | Should Be Equal ${TEST_DATA} ${thread_messages1} 126 | Should Be Equal ${data} ${thread_messages2} 127 | [Teardown] Clear Messages From Thread ${MAIN_THREAD} 128 | 129 | Purge Test 130 | ${producer_id}= Create Producer message.timeout.ms=${30000} 131 | Produce group_id=${producer_id} topic=${TEST_TOPIC} value=After partition=${P_ID} 132 | Produce group_id=${producer_id} topic=${TEST_TOPIC} value=Clear partition=${P_ID} 133 | 134 | Purge group_id=${producer_id} in_queue=${False} 135 | ${count}= Flush ${producer_id} timeout=${0} 136 | Should Be Equal As Integers 2 ${count} 137 | Purge group_id=${producer_id} 138 | ${count}= Flush ${producer_id} timeout=${0} 139 | Should Be Equal As Integers 0 ${count} 140 | 141 | Offsets Test 142 | ${group_id}= Create Consumer enable.auto.offset.store=${False} 143 | Subscribe Topic group_id=${group_id} topics=${TEST_TOPIC} 144 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${P_ID} ${OFFSET_BEGINNING} 145 | ${offsets}= Create List ${tp} 146 | Run Keyword And Expect Error KafkaException: * Store Offsets group_id=${group_id} offsets=${offsets} 147 | Assign To Topic Partition ${group_id} ${tp} 148 | Sleep 5sec 149 | Store Offsets group_id=${group_id} offsets=${offsets} 150 | [Teardown] Unassign Teardown ${group_id} 151 | 152 | *** Keywords *** 153 | Starting Test 154 | Set Suite Variable ${TEST_TOPIC} test 155 | ${thread}= Start Consumer Threaded topics=${TEST_TOPIC} 156 | ${gid}= Get Thread Group Id ${thread} 157 | Log ${gid} 158 | Set Suite Variable ${THREADED_GROUPID} ${gid} 159 | Set Suite Variable ${MAIN_THREAD} ${thread} 160 | ${producer_group_id}= Create Producer 161 | Set Suite Variable ${PRODUCER_ID} ${producer_group_id} 162 | 163 | Set Suite Variable ${P_ID} ${0} 164 | Prepare Data 165 | 166 | ${topics}= List Topics ${producer_group_id} 167 | ${partitions}= Get Topic Partitions ${topics['${TEST_TOPIC}']} 168 | ${partition_id}= Set Variable ${partitions[0].id} 169 | Set Suite Variable ${P_ID} ${partition_id} 170 | ${tp}= Create Topic Partition ${TEST_TOPIC} ${partition_id} ${OFFSET_BEGINNING} 171 | 172 | ${data}= Create List Hello World {'test': 1} {'test': 2} {'test': 3} {'test': 4} 173 | Set Suite Variable ${TEST_DATA} ${data} 174 | Prepare Data 175 | 176 | Prepare Data 177 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=Hello partition=${P_ID} 178 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value=World partition=${P_ID} 179 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 1} partition=${P_ID} 180 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 2} partition=${P_ID} 181 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 3} partition=${P_ID} 182 | Produce group_id=${PRODUCER_ID} topic=${TEST_TOPIC} value={'test': 4} partition=${P_ID} 183 | Wait Until Keyword Succeeds 10x 0.5s All Messages Are Delivered ${PRODUCER_ID} 184 | Sleep 1sec # if next command is polling messages in thread we need to wait a second 185 | 186 | All Messages Are Delivered 187 | [Arguments] ${producer_id} 188 | ${count}= Flush ${producer_id} 189 | Log Reaming messages to be delivered: ${count} 190 | Should Be Equal As Integers ${count} 0 191 | 192 | Basic Teardown 193 | [Arguments] ${group_id} 194 | Unsubscribe ${group_id} 195 | Close Consumer ${group_id} 196 | ${groups}= Create List ${group_id} 197 | ${admin_client_id}= Create Admin Client 198 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 199 | Log ${resp} 200 | 201 | Unassign Teardown 202 | [Arguments] ${group_id} 203 | Unassign ${group_id} 204 | Close Consumer ${group_id} 205 | ${groups}= Create List ${group_id} 206 | ${admin_client_id}= Create Admin Client 207 | ${resp}= Delete Groups ${admin_client_id} group_ids=${groups} 208 | Log ${resp} 209 | 210 | Stop Thread 211 | ${resp}= Stop Consumer Threaded ${MAIN_THREAD} 212 | Log ${resp} -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 Robert Karasek 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/ConfluentKafkaLibrary/consumer.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from threading import Thread 3 | from confluent_kafka import Consumer, KafkaException, KafkaError, TopicPartition 4 | from confluent_kafka import DeserializingConsumer 5 | from confluent_kafka.admin import AdminClient 6 | 7 | try: 8 | from confluent_kafka.avro.serializer import SerializerError 9 | except ImportError: 10 | pass 11 | 12 | 13 | class GetMessagesThread(Thread): 14 | 15 | def __init__( 16 | self, 17 | server='127.0.0.1', 18 | port='9092', 19 | topics='', 20 | group_id=None, 21 | only_value=True, 22 | **kwargs 23 | ): 24 | 25 | super().__init__() 26 | self.daemon = True 27 | self.server = server 28 | self.port = port 29 | self._is_running = True 30 | self.only_value = only_value 31 | self.consumer = KafkaConsumer() 32 | self.group_id = self.consumer.create_consumer(group_id=group_id, 33 | server=server, 34 | port=port, 35 | **kwargs) 36 | self.kwargs = kwargs 37 | if not isinstance(topics, list): 38 | topics = [topics] 39 | self.consumer.subscribe_topic(self.group_id, topics=topics) 40 | self.messages = [] 41 | self.messages += self.consumer.poll(group_id=self.group_id, only_value=self.only_value) 42 | self.start() 43 | 44 | def run(self): 45 | while self._is_running: 46 | try: 47 | self.messages += self.consumer.poll(group_id=self.group_id, only_value=self.only_value) 48 | except RuntimeError: 49 | self.consumer.unsubscribe(self.group_id) 50 | self.consumer.close_consumer(self.group_id) 51 | self._is_running = False 52 | 53 | def get_group_id(self): 54 | return self.group_id 55 | 56 | def get_messages(self): 57 | return self.messages[:] 58 | 59 | def clear_messages(self): 60 | self.messages.clear() 61 | 62 | def stop_consumer(self): 63 | self._is_running = False 64 | self.join() 65 | self.consumer.unsubscribe(self.group_id) 66 | self.consumer.close_consumer(self.group_id) 67 | admin_client = AdminClient({'bootstrap.servers': f'{self.server}:{self.port}', **self.kwargs}) 68 | response = admin_client.delete_consumer_groups([self.group_id], request_timeout=10) 69 | try: 70 | response[self.group_id].result() 71 | except Exception as e: 72 | return e 73 | return response[self.group_id].exception() 74 | 75 | 76 | class KafkaConsumer(): 77 | 78 | def __init__(self): 79 | self.consumers = {} 80 | 81 | def create_consumer( 82 | self, 83 | group_id=None, 84 | server="127.0.0.1", 85 | port="9092", 86 | enable_auto_commit=True, 87 | auto_offset_reset="latest", 88 | auto_create_topics=True, 89 | key_deserializer=None, 90 | value_deserializer=None, 91 | deserializing=False, 92 | **kwargs 93 | ): 94 | """Create Kafka Consumer and returns its `group_id` as string. 95 | 96 | Keyword Arguments: 97 | - ``server``: (str): IP address / domain, that the consumer should 98 | contact to bootstrap initial cluster metadata. 99 | Default: `127.0.0.1`. 100 | - ``port`` (int): Port number. Default: `9092`. 101 | - ``group_id`` (str or uuid.uuid4() if not set) : name of the consumer group 102 | to join for dynamic partition assignment (if enabled), and to use for fetching and 103 | committing offsets. If None, unique string is generated (via uuid.uuid4()) 104 | and offset commits are disabled. Default: `None`. 105 | - ``auto_offset_reset`` (str): A policy for resetting offsets on 106 | OffsetOutOfRange errors: `earliest` will move to the oldest 107 | available message, `latest` will move to the most recent. Any 108 | other value will raise the exception. Default: `latest`. 109 | - ``enable_auto_commit`` (bool): If true the consumer's offset will be 110 | periodically committed in the background. Default: `True`. 111 | - ``auto_create_topics`` (bool): Consumers no longer trigger auto creation of topics, 112 | will be removed in future release. Default: `True`. 113 | - ``deserializing`` (bool): Activates DeserializingConsumer with deserialization capabilities. 114 | Default: `False`. 115 | 116 | Note: 117 | Configuration parameters are described in more detail at 118 | https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md \n 119 | """ 120 | if group_id is None: 121 | group_id = str(uuid.uuid4()) 122 | 123 | if deserializing: 124 | consumer = DeserializingConsumer({ 125 | 'bootstrap.servers': '{}:{}'.format(server, port), 126 | 'group.id': group_id, 127 | 'enable.auto.commit': enable_auto_commit, 128 | 'auto.offset.reset': auto_offset_reset, 129 | 'key.deserializer': key_deserializer, 130 | 'value.deserializer': value_deserializer, 131 | **kwargs}) 132 | else: 133 | consumer = Consumer({ 134 | 'bootstrap.servers': '{}:{}'.format(server, port), 135 | 'group.id': group_id, 136 | 'enable.auto.commit': enable_auto_commit, 137 | 'allow.auto.create.topics': auto_create_topics, 138 | 'auto.offset.reset': auto_offset_reset, 139 | **kwargs}) 140 | 141 | self.consumers[group_id] = consumer 142 | return group_id 143 | 144 | def get_all_consumers(self): 145 | """Returns all non-threaded consumers 146 | """ 147 | return self.consumers 148 | 149 | def create_topic_partition(self, topic_name, partition=None, offset=None): 150 | """Returns TopicPartiton object based on 151 | https://docs.confluent.io/current/clients/confluent-kafka-python/#topicpartition 152 | 153 | - ``topic_name`` (str): Topic name. 154 | - ``partition`` (int): Partition id. 155 | - ``offset`` (int): Initial partition offset. 156 | """ 157 | if partition is not None and offset is not None: 158 | return TopicPartition(topic_name, partition, offset) 159 | elif partition is None: 160 | return TopicPartition(topic_name, offset) 161 | elif offset is None: 162 | return TopicPartition(topic_name, partition) 163 | return TopicPartition(topic_name) 164 | 165 | def get_topic_partitions(self, topic): 166 | """Returns dictionary of all TopicPartitons in topic (topic.partitions). 167 | """ 168 | return topic.partitions 169 | 170 | def subscribe_topic(self, group_id, topics, **kwargs): 171 | """Subscribe to a list of topics, or a topic regex pattern. 172 | https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Consumer.subscribe 173 | 174 | - ``topics`` (list): List of topics for subscription. 175 | """ 176 | if not isinstance(topics, list): 177 | topics = [topics] 178 | self.consumers[group_id].subscribe(topics, **kwargs) 179 | 180 | def get_watermark_offsets(self, group_id, topic_partition, **kwargs): 181 | """Retrieve low and high offsets for partition. 182 | """ 183 | if not isinstance(topic_partition, TopicPartition): 184 | raise TypeError('topic_partition needs to be TopicPartition() type!') 185 | return self.consumers[group_id].get_watermark_offsets(topic_partition, **kwargs) 186 | 187 | def get_assignment(self, group_id): 188 | return self.consumers[group_id].assignment() 189 | 190 | def assign_to_topic_partition(self, group_id, topic_partitions): 191 | """Assign a list of TopicPartitions. 192 | 193 | - ``topic_partitions`` (`TopicPartition` or list of `TopicPartition`): Assignment for this instance. 194 | """ 195 | if isinstance(topic_partitions, TopicPartition): 196 | topic_partitions = [topic_partitions] 197 | for topic_partition in topic_partitions: 198 | if topic_partition not in self.consumers[group_id].assignment(): 199 | self.consumers[group_id].assign(topic_partitions) 200 | 201 | def unassign(self, group_id): 202 | self.consumers[group_id].unassign() 203 | 204 | def unsubscribe(self, group_id): 205 | """Unsubscribe of topics. 206 | """ 207 | self.consumers[group_id].unsubscribe() 208 | 209 | def close_consumer(self, group_id): 210 | """Close down and terminate the Kafka Consumer. 211 | """ 212 | self.consumers[group_id].close() 213 | del self.consumers[group_id] 214 | 215 | def seek(self, group_id, topic_partition): 216 | """https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Consumer.seek 217 | """ 218 | return self.consumers[group_id].seek(topic_partition) 219 | 220 | def get_position(self, group_id, topic_partitions): 221 | """Retrieve current positions (offsets) for the list of partitions. 222 | 223 | - ``topic_partitions`` (`TopicPartition` or list of `TopicPartition`): Assignment for this instance. 224 | """ 225 | if isinstance(topic_partitions, TopicPartition): 226 | topic_partitions = [topic_partitions] 227 | return self.consumers[group_id].position(topic_partitions) 228 | 229 | def pause(self, group_id, topic_partitions): 230 | """Pause consumption for the provided list of partitions. 231 | """ 232 | if isinstance(topic_partitions, TopicPartition): 233 | topic_partitions = [topic_partitions] 234 | self.consumers[group_id].pause(topic_partitions) 235 | 236 | def resume(self, group_id, topic_partitions): 237 | """Resume consumption for the provided list of partitions. 238 | """ 239 | if isinstance(topic_partitions, TopicPartition): 240 | topic_partitions = [topic_partitions] 241 | self.consumers[group_id].resume(topic_partitions) 242 | 243 | def store_offsets(self, group_id, **kwargs): 244 | """Store offsets for a message or a list of offsets. 245 | """ 246 | self.consumers[group_id].store_offsets(**kwargs) 247 | 248 | def poll( 249 | self, 250 | group_id, 251 | timeout=1, 252 | max_records=1, 253 | poll_attempts=10, 254 | only_value=True, 255 | auto_create_topics=True, 256 | decode_format=None, 257 | fail_on_deserialization=False 258 | ): 259 | """Fetch and return messages from assigned topics / partitions as list. 260 | - ``timeout`` (int): Seconds spent waiting in poll if data is not available in the buffer.\n 261 | - ``max_records`` (int): maximum number of messages to get from poll. Default: 1. 262 | If 0, returns immediately with any records that are available currently in the buffer, 263 | else returns empty. Must not be negative. Default: `1` 264 | - ``poll_attempts`` (int): Attempts to consume messages and endless looping prevention. 265 | Sometimes the first messages are None or the topic could be empty. Default: `10`. 266 | - ``only_value`` (bool): Return only message.value(). Default: `True`. 267 | - ``decode_format`` (str) - If you need to decode data to specific format 268 | (See https://docs.python.org/3/library/codecs.html#standard-encodings). Default: None. 269 | - ``auto_create_topics`` (bool): Consumers no longer trigger auto creation of topics, 270 | will be removed in future release. If True then the error message UNKNOWN_TOPIC_OR_PART is ignored. 271 | Default: `True`. 272 | - ``fail_on_deserialization`` (bool): If True and message deserialization fails, will raise a SerializerError 273 | exception; on False will just stop the current poll and return the message so far. Default: `False`. 274 | """ 275 | 276 | messages = [] 277 | while poll_attempts > 0: 278 | msg = None 279 | try: 280 | msg = self.consumers[group_id].poll(timeout=timeout) 281 | except SerializerError as err: 282 | error = 'Message deserialization failed for {}: {}'.format(msg, err) 283 | if fail_on_deserialization: 284 | raise SerializerError(error) 285 | 286 | print(error) 287 | break 288 | 289 | if msg is None: 290 | poll_attempts -= 1 291 | continue 292 | 293 | if msg.error(): 294 | # Workaround due to new message return + deprecation of the "Consumers no longer trigger auto creation of topics" 295 | if int(msg.error().code()) == KafkaError.UNKNOWN_TOPIC_OR_PART and auto_create_topics: 296 | continue 297 | raise KafkaException(msg.error()) 298 | 299 | if only_value: 300 | messages.append(msg.value()) 301 | else: 302 | messages.append(msg) 303 | 304 | if len(messages) == max_records: 305 | break 306 | 307 | if decode_format: 308 | messages = self._decode_data(data=messages, decode_format=decode_format) 309 | 310 | return messages 311 | 312 | def _decode_data(self, data, decode_format): 313 | if decode_format: 314 | return [record.decode(str(decode_format)) for record in data] 315 | return data 316 | 317 | # Experimental - getting messages from kafka topic every second 318 | def start_consumer_threaded( 319 | self, 320 | topics, 321 | group_id=None, 322 | server='127.0.0.1', 323 | port='9092', 324 | only_value=True, 325 | **kwargs 326 | ): 327 | """Run consumer in daemon thread and store data from topics. To read and work with this 328 | collected data use keyword `Get Messages From Thread`. 329 | Could be used at the Test setup or in each test. 330 | This is useful when you are reading always the same topics and you don't want to create 331 | consumer in each test to poll data. You can create as many consumers in the Test setup 332 | as you want and then in test just read data with `Get Messages From Thread` keyword. 333 | - ``topics`` (list): List of topics for subscription. 334 | - ``group_id`` (str or uuid.uuid4() if not set) : name of the consumer group to join for 335 | dynamic partition assignment (if enabled), and to use for fetching and 336 | committing offsets. If None, unique string is generated (via uuid.uuid4()) 337 | and offset commits are disabled. Default: `None`. 338 | """ 339 | if group_id is None: 340 | group_id = str(uuid.uuid4()) 341 | if topics is None: 342 | raise ValueError("Topics can not be empty!") 343 | 344 | consumer_thread = GetMessagesThread(server, port, topics, group_id=group_id, only_value=only_value, **kwargs) 345 | group_id = consumer_thread.group_id 346 | self.consumers[group_id] = consumer_thread.consumer.consumers[group_id] 347 | return consumer_thread 348 | 349 | def get_messages_from_thread(self, running_thread, decode_format=None): 350 | """Returns all records gathered from specific thread 351 | - ``running_thread`` (Thread object) - thread which was executed with 352 | `Start Consumer Threaded` keyword 353 | - ``decode_format`` (str) - If you need to decode data to specific format 354 | (See https://docs.python.org/3/library/codecs.html#standard-encodings). Default: None. 355 | """ 356 | records = running_thread.get_messages() 357 | if records: 358 | records = self._decode_data(records, decode_format) 359 | return records 360 | 361 | def get_thread_group_id(self, running_thread): 362 | return running_thread.get_group_id() 363 | 364 | def clear_messages_from_thread(self, running_thread): 365 | """Remove all records gathered from specific thread 366 | - ``running_thread`` (Thread object) - thread which was executed with 367 | `Start Consumer Threaded` keyword 368 | """ 369 | try: 370 | running_thread.clear_messages() 371 | except Exception as e: 372 | return f"Messages were not removed from thread {running_thread}!\n{e}" 373 | 374 | def stop_consumer_threaded(self, running_thread): 375 | resp = running_thread.stop_consumer() 376 | return resp --------------------------------------------------------------------------------