├── kafka ├── cli │ ├── __init__.py │ ├── admin │ │ ├── topics │ │ │ ├── list.py │ │ │ ├── delete.py │ │ │ ├── describe.py │ │ │ ├── __init__.py │ │ │ └── create.py │ │ ├── cluster │ │ │ ├── describe.py │ │ │ └── __init__.py │ │ ├── consumer_groups │ │ │ ├── list.py │ │ │ ├── delete.py │ │ │ ├── list_offsets.py │ │ │ ├── describe.py │ │ │ └── __init__.py │ │ ├── log_dirs │ │ │ ├── describe.py │ │ │ └── __init__.py │ │ └── configs │ │ │ ├── __init__.py │ │ │ └── describe.py │ └── producer │ │ └── __init__.py ├── benchmarks │ ├── __init__.py │ ├── README.md │ ├── record_batch_compose.py │ └── record_batch_read.py ├── vendor │ └── __init__.py ├── coordinator │ ├── __init__.py │ ├── assignors │ │ ├── __init__.py │ │ ├── sticky │ │ │ ├── __init__.py │ │ │ └── sorted_set.py │ │ └── abstract.py │ ├── subscription.py │ └── protocol.py ├── version.py ├── serializer │ ├── __init__.py │ └── abstract.py ├── admin │ ├── __main__.py │ ├── __init__.py │ ├── new_partitions.py │ ├── new_topic.py │ └── config_resource.py ├── consumer │ ├── __main__.py │ └── __init__.py ├── producer │ ├── __main__.py │ └── __init__.py ├── __main__.py ├── record │ ├── __init__.py │ └── README ├── partitioner │ ├── __init__.py │ └── default.py ├── metrics │ ├── stats │ │ ├── percentile.py │ │ ├── total.py │ │ ├── count.py │ │ ├── max_stat.py │ │ ├── __init__.py │ │ ├── min_stat.py │ │ └── avg.py │ ├── measurable_stat.py │ ├── __init__.py │ ├── stat.py │ ├── measurable.py │ ├── compound_stat.py │ ├── kafka_metric.py │ ├── quota.py │ ├── metric_config.py │ ├── metrics_reporter.py │ └── dict_reporter.py ├── protocol │ ├── abstract.py │ ├── frame.py │ ├── sasl_handshake.py │ ├── init_producer_id.py │ ├── sasl_authenticate.py │ ├── __init__.py │ ├── end_txn.py │ ├── add_offsets_to_txn.py │ ├── find_coordinator.py │ ├── add_partitions_to_txn.py │ ├── struct.py │ └── txn_offset_commit.py ├── sasl │ ├── abc.py │ ├── __init__.py │ └── plain.py └── __init__.py ├── test ├── integration │ └── __init__.py ├── __init__.py ├── protocol │ ├── test_bit_field.py │ ├── test_api.py │ └── test_compact.py ├── test_api_object_implementation.py ├── test_util.py ├── test_partition_movements.py ├── test_package.py ├── test_producer.py ├── test_partitioner.py ├── sasl │ └── test_gssapi.py ├── conftest.py ├── test_subscription_state.py ├── test_consumer.py └── testutil.py ├── .covrc ├── docs ├── apidoc │ ├── KafkaClient.rst │ ├── KafkaConsumer.rst │ ├── KafkaProducer.rst │ ├── TopicPartition.rst │ ├── BrokerConnection.rst │ ├── KafkaAdminClient.rst │ ├── ClusterMetadata.rst │ ├── OffsetAndMetadata.rst │ └── modules.rst ├── requirements.txt ├── license.rst ├── support.rst ├── compatibility.rst ├── tests.rst └── install.rst ├── servers ├── 1.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.0.2 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.1.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.1.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.1.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.1.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.2.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.3.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.4.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.5.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── trunk │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.1.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.2.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.2.2 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.2 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.3 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.6.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── resources │ └── default │ │ ├── kafka_server_jaas.conf │ │ ├── sasl_command.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.0 │ └── resources │ │ ├── zookeeper.properties │ │ ├── log4j.properties │ │ └── kafka.properties ├── 0.8.1 │ └── resources │ │ ├── zookeeper.properties │ │ ├── log4j.properties │ │ └── kafka.properties ├── 0.8.1.1 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.2.0 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.2.1 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.2.2 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.9.0.0 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties └── 0.9.0.1 │ └── resources │ ├── zookeeper.properties │ └── log4j.properties ├── setup.py ├── MANIFEST.in ├── pylint.rc ├── pytest.ini ├── .github ├── dependabot.yml └── workflows │ ├── codeql-analysis.yml │ └── python-package.yml ├── requirements-dev.txt ├── .gitignore ├── .readthedocs.yaml ├── pyproject.toml ├── example.py └── AUTHORS.md /kafka/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/vendor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/integration/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/coordinator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '2.3.0' 2 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/sticky/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.covrc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | kafka/vendor/* 4 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaClient.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.KafkaClient 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaConsumer.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.KafkaConsumer 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaProducer.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.KafkaProducer 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/apidoc/TopicPartition.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.TopicPartition 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/apidoc/BrokerConnection.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.BrokerConnection 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaAdminClient.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.KafkaAdminClient 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/apidoc/ClusterMetadata.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.cluster.ClusterMetadata 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/apidoc/OffsetAndMetadata.rst: -------------------------------------------------------------------------------- 1 | .. autoclass:: kafka.OffsetAndMetadata 2 | :members: 3 | -------------------------------------------------------------------------------- /kafka/serializer/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.serializer.abstract import Serializer, Deserializer 2 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.0.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.1.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.1.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.2.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.3.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.4.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.5.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/trunk/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /kafka/admin/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from kafka.cli.admin import run_cli 4 | 5 | sys.exit(run_cli()) 6 | -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.6.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; 5 | -------------------------------------------------------------------------------- /servers/resources/default/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /kafka/consumer/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from kafka.cli.consumer import run_cli 4 | 5 | sys.exit(run_cli()) 6 | -------------------------------------------------------------------------------- /kafka/producer/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from kafka.cli.producer import run_cli 4 | 5 | sys.exit(run_cli()) 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # See pyproject.toml for project / build configuration 2 | from setuptools import setup 3 | 4 | setup() 5 | -------------------------------------------------------------------------------- /kafka/consumer/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.consumer.group import KafkaConsumer 2 | 3 | __all__ = [ 4 | 'KafkaConsumer' 5 | ] 6 | -------------------------------------------------------------------------------- /kafka/producer/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.producer.kafka import KafkaProducer 2 | 3 | __all__ = [ 4 | 'KafkaProducer' 5 | ] 6 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include kafka *.py 2 | include README.rst 3 | include LICENSE 4 | include AUTHORS.md 5 | include CHANGES.md 6 | -------------------------------------------------------------------------------- /kafka/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | print("Available module interfaces: kafka.consumer, kafka.producer, kafka.admin") 4 | 5 | sys.exit(1) 6 | -------------------------------------------------------------------------------- /servers/resources/default/sasl_command.conf: -------------------------------------------------------------------------------- 1 | security.protocol={transport} 2 | sasl.mechanism={sasl_mechanism} 3 | sasl.jaas.config={jaas_config} 4 | -------------------------------------------------------------------------------- /kafka/record/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.record.memory_records import MemoryRecords, MemoryRecordsBuilder 2 | 3 | __all__ = ["MemoryRecords", "MemoryRecordsBuilder"] 4 | -------------------------------------------------------------------------------- /pylint.rc: -------------------------------------------------------------------------------- 1 | [TYPECHECK] 2 | ignored-classes=SyncManager,_socketobject 3 | ignored-modules= 4 | generated-members=py.* 5 | 6 | [MESSAGES CONTROL] 7 | disable=E1129 8 | -------------------------------------------------------------------------------- /kafka/partitioner/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.partitioner.default import DefaultPartitioner, murmur2 2 | 3 | 4 | __all__ = [ 5 | 'DefaultPartitioner', 'murmur2' 6 | ] 7 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | log_format = %(asctime)s.%(msecs)03d %(levelname)-8s %(thread)d:%(threadName)s %(name)-23s %(message)s 3 | log_level = DEBUG 4 | addopts = --durations=10 --timeout=300 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx==8.1.3 2 | sphinx_rtd_theme==3.0.2 3 | 4 | # Install kafka-python in editable mode 5 | # This allows the sphinx autodoc module 6 | # to load the Python modules and extract docstrings. 7 | # -e .. 8 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | # Set default logging handler to avoid "No handler found" warnings. 2 | import logging 3 | logging.basicConfig(level=logging.INFO) 4 | 5 | from kafka.future import Future 6 | Future.error_on_callbacks = True # always fail during testing 7 | -------------------------------------------------------------------------------- /kafka/cli/admin/topics/list.py: -------------------------------------------------------------------------------- 1 | class ListTopics: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('list', help='List Kafka Topics') 6 | parser.set_defaults(command=lambda cli, _args: cli.list_topics()) 7 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | coveralls 2 | crc32c 3 | docker-py 4 | flake8 5 | lz4 6 | py 7 | pylint 8 | pyperf 9 | pytest 10 | pytest-cov 11 | pytest-mock 12 | pytest-pylint 13 | pytest-timeout 14 | python-snappy 15 | Sphinx 16 | sphinx-rtd-theme 17 | xxhash 18 | zstandard 19 | -------------------------------------------------------------------------------- /kafka/benchmarks/README.md: -------------------------------------------------------------------------------- 1 | The `record_batch_*` benchmarks in this section are written using 2 | ``pyperf`` library, created by Victor Stinner. For more information on 3 | how to get reliable results of test runs please consult 4 | https://pyperf.readthedocs.io/en/latest/run_benchmark.html. 5 | -------------------------------------------------------------------------------- /kafka/cli/admin/cluster/describe.py: -------------------------------------------------------------------------------- 1 | class DescribeCluster: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('describe', help='Describe Kafka Cluster') 6 | parser.set_defaults(command=lambda cli, _args: cli.describe_cluster()) 7 | -------------------------------------------------------------------------------- /docs/apidoc/modules.rst: -------------------------------------------------------------------------------- 1 | kafka-python API 2 | **************** 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | KafkaConsumer 8 | KafkaProducer 9 | KafkaAdminClient 10 | KafkaClient 11 | BrokerConnection 12 | ClusterMetadata 13 | OffsetAndMetadata 14 | TopicPartition 15 | -------------------------------------------------------------------------------- /kafka/cli/admin/consumer_groups/list.py: -------------------------------------------------------------------------------- 1 | class ListConsumerGroups: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('list', help='List Consumer Groups') 6 | parser.set_defaults(command=lambda cli, _args: cli.list_consumer_groups()) 7 | -------------------------------------------------------------------------------- /kafka/cli/admin/log_dirs/describe.py: -------------------------------------------------------------------------------- 1 | class DescribeLogDirs: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('describe', help='Get topic log directories for brokers') 6 | parser.set_defaults(command=lambda cli, _args: cli.describe_log_dirs()) 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | *.pyc 3 | .tox 4 | build 5 | dist 6 | MANIFEST 7 | env 8 | servers/*/kafka-bin* 9 | servers/*/resources/ssl* 10 | .coverage* 11 | .noseids 12 | docs/_build 13 | .cache* 14 | .idea/ 15 | integration-test/ 16 | tests-env/ 17 | .pytest_cache/ 18 | .envrc 19 | shell.nix 20 | .venv*/ 21 | -------------------------------------------------------------------------------- /kafka/cli/admin/topics/delete.py: -------------------------------------------------------------------------------- 1 | class DeleteTopic: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('delete', help='Delete Kafka Topic') 6 | parser.add_argument('-t', '--topic', type=str, required=True) 7 | parser.set_defaults(command=lambda cli, args: cli.delete_topics([args.topic])) 8 | -------------------------------------------------------------------------------- /test/protocol/test_bit_field.py: -------------------------------------------------------------------------------- 1 | import io 2 | 3 | import pytest 4 | 5 | from kafka.protocol.types import BitField 6 | 7 | 8 | @pytest.mark.parametrize(('test_set',), [ 9 | (set([0, 1, 5, 10, 31]),), 10 | (set(range(32)),), 11 | ]) 12 | def test_bit_field(test_set): 13 | assert BitField.decode(io.BytesIO(BitField.encode(test_set))) == test_set 14 | -------------------------------------------------------------------------------- /kafka/cli/admin/topics/describe.py: -------------------------------------------------------------------------------- 1 | class DescribeTopics: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('describe', help='Describe Kafka Topics') 6 | parser.add_argument('-t', '--topic', type=str, action='append', dest='topics') 7 | parser.set_defaults(command=lambda cli, args: cli.describe_topics(args.topics or None)) 8 | -------------------------------------------------------------------------------- /kafka/record/README: -------------------------------------------------------------------------------- 1 | Module structured mostly based on 2 | kafka/clients/src/main/java/org/apache/kafka/common/record/ module of Java 3 | Client. 4 | 5 | See abc.py for abstract declarations. `ABCRecords` is used as a facade to hide 6 | version differences. `ABCRecordBatch` subclasses will implement actual parsers 7 | for different versions (v0/v1 as LegacyBatch and v2 as DefaultBatch. Names 8 | taken from Java). 9 | -------------------------------------------------------------------------------- /kafka/cli/admin/consumer_groups/delete.py: -------------------------------------------------------------------------------- 1 | class DeleteConsumerGroups: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('delete', help='Delete Consumer Groups') 6 | parser.add_argument('-g', '--group-id', type=str, action='append', dest='groups', required=True) 7 | parser.set_defaults(command=lambda cli, args: cli.delete_consumer_groups(args.groups)) 8 | -------------------------------------------------------------------------------- /kafka/cli/admin/consumer_groups/list_offsets.py: -------------------------------------------------------------------------------- 1 | class ListConsumerGroupOffsets: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('list-offsets', help='List Offsets for Consumer Group') 6 | parser.add_argument('-g', '--group-id', type=str, required=True) 7 | parser.set_defaults(command=lambda cli, args: cli.list_consumer_group_offsets(args.group_id)) 8 | -------------------------------------------------------------------------------- /kafka/cli/admin/consumer_groups/describe.py: -------------------------------------------------------------------------------- 1 | class DescribeConsumerGroups: 2 | 3 | @classmethod 4 | def add_subparser(cls, subparsers): 5 | parser = subparsers.add_parser('describe', help='Describe Consumer Groups') 6 | parser.add_argument('-g', '--group-id', type=str, action='append', dest='groups', required=True) 7 | parser.set_defaults(command=lambda cli, args: cli.describe_consumer_groups(args.groups)) 8 | -------------------------------------------------------------------------------- /kafka/metrics/stats/percentile.py: -------------------------------------------------------------------------------- 1 | class Percentile(object): 2 | __slots__ = ('_metric_name', '_percentile') 3 | 4 | def __init__(self, metric_name, percentile): 5 | self._metric_name = metric_name 6 | self._percentile = float(percentile) 7 | 8 | @property 9 | def name(self): 10 | return self._metric_name 11 | 12 | @property 13 | def percentile(self): 14 | return self._percentile 15 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | License 2 | ------- 3 | 4 | .. image:: https://img.shields.io/badge/license-Apache%202-blue.svg 5 | :target: https://github.com/dpkp/kafka-python/blob/master/LICENSE 6 | 7 | Apache License, v2.0. See `LICENSE `_. 8 | 9 | Copyright 2025, Dana Powers, David Arthur, and Contributors 10 | (See `AUTHORS `_). 11 | -------------------------------------------------------------------------------- /docs/support.rst: -------------------------------------------------------------------------------- 1 | Support 2 | ------- 3 | 4 | For support, see github issues at https://github.com/dpkp/kafka-python 5 | 6 | Limited IRC chat at #kafka-python on freenode (general chat is #apache-kafka). 7 | 8 | For information about Apache Kafka generally, see https://kafka.apache.org/ 9 | 10 | For general discussion of kafka-client design and implementation (not python 11 | specific), see https://groups.google.com/forum/m/#!forum/kafka-clients 12 | -------------------------------------------------------------------------------- /kafka/protocol/abstract.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class AbstractType(object, metaclass=abc.ABCMeta): 5 | @classmethod 6 | @abc.abstractmethod 7 | def encode(cls, value): # pylint: disable=no-self-argument 8 | pass 9 | 10 | @classmethod 11 | @abc.abstractmethod 12 | def decode(cls, data): # pylint: disable=no-self-argument 13 | pass 14 | 15 | @classmethod 16 | def repr(cls, value): 17 | return repr(value) 18 | -------------------------------------------------------------------------------- /kafka/metrics/stats/total.py: -------------------------------------------------------------------------------- 1 | from kafka.metrics.measurable_stat import AbstractMeasurableStat 2 | 3 | 4 | class Total(AbstractMeasurableStat): 5 | """An un-windowed cumulative total maintained over all time.""" 6 | __slots__ = ('_total') 7 | 8 | def __init__(self, value=0.0): 9 | self._total = value 10 | 11 | def record(self, config, value, now): 12 | self._total += value 13 | 14 | def measure(self, config, now): 15 | return float(self._total) 16 | -------------------------------------------------------------------------------- /kafka/cli/admin/cluster/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .describe import DescribeCluster 4 | 5 | 6 | class ClusterSubCommand: 7 | 8 | @classmethod 9 | def add_subparser(cls, subparsers): 10 | parser = subparsers.add_parser('cluster', help='Manage Kafka Cluster') 11 | commands = parser.add_subparsers() 12 | for cmd in [DescribeCluster]: 13 | cmd.add_subparser(commands) 14 | parser.set_defaults(command=lambda *_args: parser.print_help() or sys.exit(2)) 15 | -------------------------------------------------------------------------------- /kafka/cli/admin/configs/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .describe import DescribeConfigs 4 | 5 | 6 | class ConfigsSubCommand: 7 | 8 | @classmethod 9 | def add_subparser(cls, subparsers): 10 | parser = subparsers.add_parser('configs', help='Manage Kafka Configuration') 11 | commands = parser.add_subparsers() 12 | for cmd in [DescribeConfigs]: 13 | cmd.add_subparser(commands) 14 | parser.set_defaults(command=lambda *_args: parser.print_help() or sys.exit(2)) 15 | -------------------------------------------------------------------------------- /kafka/metrics/measurable_stat.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | from kafka.metrics.measurable import AbstractMeasurable 4 | from kafka.metrics.stat import AbstractStat 5 | 6 | 7 | class AbstractMeasurableStat(AbstractStat, AbstractMeasurable, metaclass=abc.ABCMeta): 8 | """ 9 | An AbstractMeasurableStat is an AbstractStat that is also 10 | an AbstractMeasurable (i.e. can produce a single floating point value). 11 | This is the interface used for most of the simple statistics such 12 | as Avg, Max, Count, etc. 13 | """ 14 | -------------------------------------------------------------------------------- /kafka/cli/admin/log_dirs/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .describe import DescribeLogDirs 4 | 5 | 6 | class LogDirsSubCommand: 7 | 8 | @classmethod 9 | def add_subparser(cls, subparsers): 10 | parser = subparsers.add_parser('log-dirs', help='Manage Kafka Topic/Partition Log Directories') 11 | commands = parser.add_subparsers() 12 | for cmd in [DescribeLogDirs]: 13 | cmd.add_subparser(commands) 14 | parser.set_defaults(command=lambda *_args: parser.print_help() or sys.exit(2)) 15 | -------------------------------------------------------------------------------- /kafka/metrics/stats/count.py: -------------------------------------------------------------------------------- 1 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 2 | 3 | 4 | class Count(AbstractSampledStat): 5 | """ 6 | An AbstractSampledStat that maintains a simple count of what it has seen. 7 | """ 8 | __slots__ = ('_initial_value', '_samples', '_current') 9 | 10 | def __init__(self): 11 | super(Count, self).__init__(0.0) 12 | 13 | def update(self, sample, config, value, now): 14 | sample.value += 1.0 15 | 16 | def combine(self, samples, config, now): 17 | return float(sum(sample.value for sample in samples)) 18 | -------------------------------------------------------------------------------- /kafka/serializer/abstract.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class Serializer(object): 5 | __meta__ = abc.ABCMeta 6 | 7 | def __init__(self, **config): 8 | pass 9 | 10 | @abc.abstractmethod 11 | def serialize(self, topic, value): 12 | pass 13 | 14 | def close(self): 15 | pass 16 | 17 | 18 | class Deserializer(object): 19 | __meta__ = abc.ABCMeta 20 | 21 | def __init__(self, **config): 22 | pass 23 | 24 | @abc.abstractmethod 25 | def deserialize(self, topic, bytes_): 26 | pass 27 | 28 | def close(self): 29 | pass 30 | -------------------------------------------------------------------------------- /kafka/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.metrics.compound_stat import NamedMeasurable 2 | from kafka.metrics.dict_reporter import DictReporter 3 | from kafka.metrics.kafka_metric import KafkaMetric 4 | from kafka.metrics.measurable import AnonMeasurable 5 | from kafka.metrics.metric_config import MetricConfig 6 | from kafka.metrics.metric_name import MetricName 7 | from kafka.metrics.metrics import Metrics 8 | from kafka.metrics.quota import Quota 9 | 10 | __all__ = [ 11 | 'AnonMeasurable', 'DictReporter', 'KafkaMetric', 'MetricConfig', 12 | 'MetricName', 'Metrics', 'NamedMeasurable', 'Quota' 13 | ] 14 | -------------------------------------------------------------------------------- /kafka/metrics/stats/max_stat.py: -------------------------------------------------------------------------------- 1 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 2 | 3 | 4 | class Max(AbstractSampledStat): 5 | """An AbstractSampledStat that gives the max over its samples.""" 6 | __slots__ = ('_initial_value', '_samples', '_current') 7 | 8 | def __init__(self): 9 | super(Max, self).__init__(float('-inf')) 10 | 11 | def update(self, sample, config, value, now): 12 | sample.value = max(sample.value, value) 13 | 14 | def combine(self, samples, config, now): 15 | if not samples: 16 | return float('-inf') 17 | return float(max(sample.value for sample in samples)) 18 | -------------------------------------------------------------------------------- /kafka/cli/admin/topics/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .create import CreateTopic 4 | from .delete import DeleteTopic 5 | from .describe import DescribeTopics 6 | from .list import ListTopics 7 | 8 | 9 | class TopicsSubCommand: 10 | 11 | @classmethod 12 | def add_subparser(cls, subparsers): 13 | parser = subparsers.add_parser('topics', help='List/Describe/Create/Delete Kafka Topics') 14 | commands = parser.add_subparsers() 15 | for cmd in [ListTopics, DescribeTopics, CreateTopic, DeleteTopic]: 16 | cmd.add_subparser(commands) 17 | parser.set_defaults(command=lambda *_args: parser.print_help() or sys.exit(2)) 18 | -------------------------------------------------------------------------------- /kafka/metrics/stat.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class AbstractStat(object, metaclass=abc.ABCMeta): 5 | """ 6 | An AbstractStat is a quantity such as average, max, etc that is computed 7 | off the stream of updates to a sensor 8 | """ 9 | @abc.abstractmethod 10 | def record(self, config, value, time_ms): 11 | """ 12 | Record the given value 13 | 14 | Arguments: 15 | config (MetricConfig): The configuration to use for this metric 16 | value (float): The value to record 17 | timeMs (int): The POSIX time in milliseconds this value occurred 18 | """ 19 | raise NotImplementedError 20 | -------------------------------------------------------------------------------- /kafka/metrics/stats/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.metrics.stats.avg import Avg 2 | from kafka.metrics.stats.count import Count 3 | from kafka.metrics.stats.histogram import Histogram 4 | from kafka.metrics.stats.max_stat import Max 5 | from kafka.metrics.stats.min_stat import Min 6 | from kafka.metrics.stats.percentile import Percentile 7 | from kafka.metrics.stats.percentiles import Percentiles 8 | from kafka.metrics.stats.rate import Rate 9 | from kafka.metrics.stats.sensor import Sensor 10 | from kafka.metrics.stats.total import Total 11 | 12 | __all__ = [ 13 | 'Avg', 'Count', 'Histogram', 'Max', 'Min', 'Percentile', 'Percentiles', 14 | 'Rate', 'Sensor', 'Total' 15 | ] 16 | -------------------------------------------------------------------------------- /kafka/metrics/stats/min_stat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 4 | 5 | 6 | class Min(AbstractSampledStat): 7 | """An AbstractSampledStat that gives the min over its samples.""" 8 | __slots__ = ('_initial_value', '_samples', '_current') 9 | 10 | def __init__(self): 11 | super(Min, self).__init__(float(sys.maxsize)) 12 | 13 | def update(self, sample, config, value, now): 14 | sample.value = min(sample.value, value) 15 | 16 | def combine(self, samples, config, now): 17 | if not samples: 18 | return float(sys.maxsize) 19 | return float(min(sample.value for sample in samples)) 20 | -------------------------------------------------------------------------------- /kafka/sasl/abc.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class SaslMechanism(object, metaclass=abc.ABCMeta): 5 | @abc.abstractmethod 6 | def __init__(self, **config): 7 | pass 8 | 9 | @abc.abstractmethod 10 | def auth_bytes(self): 11 | pass 12 | 13 | @abc.abstractmethod 14 | def receive(self, auth_bytes): 15 | pass 16 | 17 | @abc.abstractmethod 18 | def is_done(self): 19 | pass 20 | 21 | @abc.abstractmethod 22 | def is_authenticated(self): 23 | pass 24 | 25 | def auth_details(self): 26 | if not self.is_authenticated: 27 | raise RuntimeError('Not authenticated yet!') 28 | return 'Authenticated via SASL' 29 | -------------------------------------------------------------------------------- /kafka/cli/admin/topics/create.py: -------------------------------------------------------------------------------- 1 | from kafka.admin.new_topic import NewTopic 2 | 3 | 4 | class CreateTopic: 5 | 6 | @classmethod 7 | def add_subparser(cls, subparsers): 8 | parser = subparsers.add_parser('create', help='Create a Kafka Topic') 9 | parser.add_argument('-t', '--topic', type=str, required=True) 10 | parser.add_argument('--num-partitions', type=int, default=-1) 11 | parser.add_argument('--replication-factor', type=int, default=-1) 12 | parser.set_defaults(command=cls.command) 13 | 14 | @classmethod 15 | def command(cls, client, args): 16 | return client.create_topics([NewTopic(args.topic, args.num_partitions, args.replication_factor)]) 17 | -------------------------------------------------------------------------------- /kafka/admin/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.admin.config_resource import ConfigResource, ConfigResourceType 2 | from kafka.admin.client import KafkaAdminClient 3 | from kafka.admin.acl_resource import (ACL, ACLFilter, ResourcePattern, ResourcePatternFilter, ACLOperation, 4 | ResourceType, ACLPermissionType, ACLResourcePatternType) 5 | from kafka.admin.new_topic import NewTopic 6 | from kafka.admin.new_partitions import NewPartitions 7 | 8 | __all__ = [ 9 | 'ConfigResource', 'ConfigResourceType', 'KafkaAdminClient', 'NewTopic', 'NewPartitions', 'ACL', 'ACLFilter', 10 | 'ResourcePattern', 'ResourcePatternFilter', 'ACLOperation', 'ResourceType', 'ACLPermissionType', 11 | 'ACLResourcePatternType' 12 | ] 13 | -------------------------------------------------------------------------------- /kafka/cli/admin/consumer_groups/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .delete import DeleteConsumerGroups 4 | from .describe import DescribeConsumerGroups 5 | from .list import ListConsumerGroups 6 | from .list_offsets import ListConsumerGroupOffsets 7 | 8 | 9 | class ConsumerGroupsSubCommand: 10 | 11 | @classmethod 12 | def add_subparser(cls, subparsers): 13 | parser = subparsers.add_parser('consumer-groups', help='Manage Kafka Consumer Groups') 14 | commands = parser.add_subparsers() 15 | for cmd in [ListConsumerGroups, DescribeConsumerGroups, ListConsumerGroupOffsets, DeleteConsumerGroups]: 16 | cmd.add_subparser(commands) 17 | parser.set_defaults(command=lambda *_args: parser.print_help() or sys.exit(2)) 18 | -------------------------------------------------------------------------------- /kafka/admin/new_partitions.py: -------------------------------------------------------------------------------- 1 | class NewPartitions(object): 2 | """A class for new partition creation on existing topics. Note that the length of new_assignments, if specified, 3 | must be the difference between the new total number of partitions and the existing number of partitions. 4 | Arguments: 5 | total_count (int): the total number of partitions that should exist on the topic 6 | new_assignments ([[int]]): an array of arrays of replica assignments for new partitions. 7 | If not set, broker assigns replicas per an internal algorithm. 8 | """ 9 | 10 | def __init__( 11 | self, 12 | total_count, 13 | new_assignments=None 14 | ): 15 | self.total_count = total_count 16 | self.new_assignments = new_assignments 17 | -------------------------------------------------------------------------------- /test/test_api_object_implementation.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import pytest 3 | 4 | from kafka.protocol.api import Request 5 | from kafka.protocol.api import Response 6 | 7 | 8 | attr_names = [n for n in dir(Request) if isinstance(getattr(Request, n), abc.abstractproperty)] 9 | @pytest.mark.parametrize('klass', Request.__subclasses__()) 10 | @pytest.mark.parametrize('attr_name', attr_names) 11 | def test_request_type_conformance(klass, attr_name): 12 | assert hasattr(klass, attr_name) 13 | 14 | attr_names = [n for n in dir(Response) if isinstance(getattr(Response, n), abc.abstractproperty)] 15 | @pytest.mark.parametrize('klass', Response.__subclasses__()) 16 | @pytest.mark.parametrize('attr_name', attr_names) 17 | def test_response_type_conformance(klass, attr_name): 18 | assert hasattr(klass, attr_name) 19 | -------------------------------------------------------------------------------- /kafka/metrics/stats/avg.py: -------------------------------------------------------------------------------- 1 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 2 | 3 | 4 | class Avg(AbstractSampledStat): 5 | """ 6 | An AbstractSampledStat that maintains a simple average over its samples. 7 | """ 8 | __slots__ = ('_initial_value', '_samples', '_current') 9 | 10 | def __init__(self): 11 | super(Avg, self).__init__(0.0) 12 | 13 | def update(self, sample, config, value, now): 14 | sample.value += value 15 | 16 | def combine(self, samples, config, now): 17 | total_sum = 0 18 | total_count = 0 19 | for sample in samples: 20 | total_sum += sample.value 21 | total_count += sample.event_count 22 | if not total_count: 23 | return 0 24 | return float(total_sum) / total_count 25 | -------------------------------------------------------------------------------- /kafka/metrics/measurable.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class AbstractMeasurable(object): 5 | """A measurable quantity that can be registered as a metric""" 6 | @abc.abstractmethod 7 | def measure(self, config, now): 8 | """ 9 | Measure this quantity and return the result 10 | 11 | Arguments: 12 | config (MetricConfig): The configuration for this metric 13 | now (int): The POSIX time in milliseconds the measurement 14 | is being taken 15 | 16 | Returns: 17 | The measured value 18 | """ 19 | raise NotImplementedError 20 | 21 | 22 | class AnonMeasurable(AbstractMeasurable): 23 | def __init__(self, measure_fn): 24 | self._measure_fn = measure_fn 25 | 26 | def measure(self, config, now): 27 | return float(self._measure_fn(config, now)) 28 | -------------------------------------------------------------------------------- /kafka/protocol/frame.py: -------------------------------------------------------------------------------- 1 | class KafkaBytes(bytearray): 2 | def __init__(self, size): 3 | super(KafkaBytes, self).__init__(size) 4 | self._idx = 0 5 | 6 | def read(self, nbytes=None): 7 | if nbytes is None: 8 | nbytes = len(self) - self._idx 9 | start = self._idx 10 | self._idx += nbytes 11 | if self._idx > len(self): 12 | self._idx = len(self) 13 | return bytes(self[start:self._idx]) 14 | 15 | def write(self, data): 16 | start = self._idx 17 | self._idx += len(data) 18 | self[start:self._idx] = data 19 | 20 | def seek(self, idx): 21 | self._idx = idx 22 | 23 | def tell(self): 24 | return self._idx 25 | 26 | def __str__(self): 27 | return 'KafkaBytes(%d)' % len(self) 28 | 29 | def __repr__(self): 30 | return str(self) 31 | -------------------------------------------------------------------------------- /test/test_util.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | 3 | import pytest 4 | 5 | from kafka.util import ensure_valid_topic_name 6 | 7 | @pytest.mark.parametrize(('topic_name', 'expectation'), [ 8 | (0, pytest.raises(TypeError)), 9 | (None, pytest.raises(TypeError)), 10 | ('', pytest.raises(ValueError)), 11 | ('.', pytest.raises(ValueError)), 12 | ('..', pytest.raises(ValueError)), 13 | ('a' * 250, pytest.raises(ValueError)), 14 | ('abc/123', pytest.raises(ValueError)), 15 | ('/abc/123', pytest.raises(ValueError)), 16 | ('/abc123', pytest.raises(ValueError)), 17 | ('name with space', pytest.raises(ValueError)), 18 | ('name*with*stars', pytest.raises(ValueError)), 19 | ('name+with+plus', pytest.raises(ValueError)), 20 | ]) 21 | def test_topic_name_validation(topic_name, expectation): 22 | with expectation: 23 | ensure_valid_topic_name(topic_name) 24 | 25 | -------------------------------------------------------------------------------- /kafka/metrics/compound_stat.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | from kafka.metrics.stat import AbstractStat 4 | 5 | 6 | class AbstractCompoundStat(AbstractStat, metaclass=abc.ABCMeta): 7 | """ 8 | A compound stat is a stat where a single measurement and associated 9 | data structure feeds many metrics. This is the example for a 10 | histogram which has many associated percentiles. 11 | """ 12 | def stats(self): 13 | """ 14 | Return list of NamedMeasurable 15 | """ 16 | raise NotImplementedError 17 | 18 | 19 | class NamedMeasurable(object): 20 | __slots__ = ('_name', '_stat') 21 | 22 | def __init__(self, metric_name, measurable_stat): 23 | self._name = metric_name 24 | self._stat = measurable_stat 25 | 26 | @property 27 | def name(self): 28 | return self._name 29 | 30 | @property 31 | def stat(self): 32 | return self._stat 33 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | dataDir={tmp_dir} 17 | clientPortAddress={host} 18 | clientPort={port} 19 | maxClientCnxns=0 20 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | dataDir={tmp_dir} 17 | clientPortAddress={host} 18 | clientPort={port} 19 | maxClientCnxns=0 20 | -------------------------------------------------------------------------------- /kafka/__init__.py: -------------------------------------------------------------------------------- 1 | __title__ = 'kafka' 2 | from kafka.version import __version__ 3 | __author__ = 'Dana Powers' 4 | __license__ = 'Apache License 2.0' 5 | __copyright__ = 'Copyright 2025 Dana Powers, David Arthur, and Contributors' 6 | 7 | # Set default logging handler to avoid "No handler found" warnings. 8 | import logging 9 | 10 | logging.getLogger(__name__).addHandler(logging.NullHandler()) 11 | 12 | 13 | from kafka.admin import KafkaAdminClient 14 | from kafka.client_async import KafkaClient 15 | from kafka.consumer import KafkaConsumer 16 | from kafka.consumer.subscription_state import ConsumerRebalanceListener 17 | from kafka.producer import KafkaProducer 18 | from kafka.conn import BrokerConnection 19 | from kafka.serializer import Serializer, Deserializer 20 | from kafka.structs import TopicPartition, OffsetAndMetadata 21 | 22 | 23 | __all__ = [ 24 | 'BrokerConnection', 'ConsumerRebalanceListener', 'KafkaAdminClient', 25 | 'KafkaClient', 'KafkaConsumer', 'KafkaProducer', 26 | ] 27 | -------------------------------------------------------------------------------- /test/test_partition_movements.py: -------------------------------------------------------------------------------- 1 | from kafka.structs import TopicPartition 2 | 3 | from kafka.coordinator.assignors.sticky.partition_movements import PartitionMovements 4 | 5 | 6 | def test_empty_movements_are_sticky(): 7 | partition_movements = PartitionMovements() 8 | assert partition_movements.are_sticky() 9 | 10 | 11 | def test_sticky_movements(): 12 | partition_movements = PartitionMovements() 13 | partition_movements.move_partition(TopicPartition('t', 1), 'C1', 'C2') 14 | partition_movements.move_partition(TopicPartition('t', 1), 'C2', 'C3') 15 | partition_movements.move_partition(TopicPartition('t', 1), 'C3', 'C1') 16 | assert partition_movements.are_sticky() 17 | 18 | 19 | def test_should_detect_non_sticky_assignment(): 20 | partition_movements = PartitionMovements() 21 | partition_movements.move_partition(TopicPartition('t', 1), 'C1', 'C2') 22 | partition_movements.move_partition(TopicPartition('t', 2), 'C2', 'C1') 23 | assert not partition_movements.are_sticky() 24 | -------------------------------------------------------------------------------- /docs/compatibility.rst: -------------------------------------------------------------------------------- 1 | Compatibility 2 | ------------- 3 | 4 | .. image:: https://img.shields.io/badge/kafka-4.0--0.8-brightgreen.svg 5 | :target: https://kafka-python.readthedocs.io/compatibility.html 6 | .. image:: https://img.shields.io/pypi/pyversions/kafka-python.svg 7 | :target: https://pypi.python.org/pypi/kafka-python 8 | 9 | kafka-python is compatible with (and tested against) broker versions 4.0 10 | through 0.8.0 . kafka-python is not compatible with the 0.8.2-beta release. 11 | 12 | Because the kafka server protocol is backwards compatible, kafka-python is 13 | expected to work with newer broker releases as well. 14 | 15 | Although kafka-python is tested and expected to work on recent broker versions, 16 | not all features are supported. Please see github open issues for feature tracking. 17 | PRs welcome! 18 | 19 | kafka-python is tested on python 3.8-3.14. 20 | python 2.7 was supported through kafka-python release 2.3. 21 | 22 | Builds and tests via Github Actions Workflows. See https://github.com/dpkp/kafka-python/actions 23 | -------------------------------------------------------------------------------- /kafka/coordinator/subscription.py: -------------------------------------------------------------------------------- 1 | class Subscription(object): 2 | __slots__ = ('_metadata', '_group_instance_id') 3 | def __init__(self, metadata, group_instance_id): 4 | self._metadata = metadata 5 | self._group_instance_id = group_instance_id 6 | 7 | @property 8 | def version(self): 9 | return self._metadata.version 10 | 11 | @property 12 | def user_data(self): 13 | return self._metadata.user_data 14 | 15 | @property 16 | def topics(self): 17 | return self._metadata.topics 18 | 19 | # Alias for old interface / name 20 | subscription = topics 21 | 22 | @property 23 | def group_instance_id(self): 24 | return self._group_instance_id 25 | 26 | def encode(self): 27 | return self._metadata.encode() 28 | 29 | def __eq__(self, other): 30 | return ( 31 | isinstance(other, Subscription) and 32 | self._metadata == other._metadata and 33 | self._group_instance_id == other._group_instance_id 34 | ) 35 | -------------------------------------------------------------------------------- /kafka/cli/admin/configs/describe.py: -------------------------------------------------------------------------------- 1 | from kafka.admin.config_resource import ConfigResource 2 | 3 | 4 | class DescribeConfigs: 5 | 6 | @classmethod 7 | def add_subparser(cls, subparsers): 8 | parser = subparsers.add_parser('describe', help='Describe Kafka Configs') 9 | parser.add_argument('-t', '--topic', type=str, action='append', dest='topics', default=[]) 10 | parser.add_argument('-b', '--broker', type=str, action='append', dest='brokers', default=[]) 11 | parser.set_defaults(command=cls.command) 12 | 13 | @classmethod 14 | def command(cls, client, args): 15 | resources = [] 16 | for topic in args.topics: 17 | resources.append(ConfigResource('TOPIC', topic)) 18 | for broker in args.brokers: 19 | resources.append(ConfigResource('BROKER', broker)) 20 | 21 | response = client.describe_configs(resources) 22 | return list(zip([(r.resource_type.name, r.name) for r in resources], [{str(vals[0]): vals[1] for vals in r.resources[0][4]} for r in response])) 23 | -------------------------------------------------------------------------------- /test/test_package.py: -------------------------------------------------------------------------------- 1 | class TestPackage: 2 | def test_top_level_namespace(self): 3 | import kafka as kafka1 4 | assert kafka1.KafkaConsumer.__name__ == "KafkaConsumer" 5 | assert kafka1.consumer.__name__ == "kafka.consumer" 6 | assert kafka1.codec.__name__ == "kafka.codec" 7 | 8 | def test_submodule_namespace(self): 9 | import kafka.client_async as client1 10 | assert client1.__name__ == "kafka.client_async" 11 | 12 | from kafka import client_async as client2 13 | assert client2.__name__ == "kafka.client_async" 14 | 15 | from kafka.client_async import KafkaClient as KafkaClient1 16 | assert KafkaClient1.__name__ == "KafkaClient" 17 | 18 | from kafka import KafkaClient as KafkaClient2 19 | assert KafkaClient2.__name__ == "KafkaClient" 20 | 21 | from kafka.codec import gzip_encode as gzip_encode1 22 | assert gzip_encode1.__name__ == "gzip_encode" 23 | 24 | from kafka.codec import snappy_encode 25 | assert snappy_encode.__name__ == "snappy_encode" 26 | -------------------------------------------------------------------------------- /kafka/admin/new_topic.py: -------------------------------------------------------------------------------- 1 | class NewTopic(object): 2 | """ A class for new topic creation 3 | Arguments: 4 | name (string): name of the topic 5 | num_partitions (int): number of partitions 6 | or -1 if replica_assignment has been specified 7 | replication_factor (int): replication factor or -1 if 8 | replica assignment is specified 9 | replica_assignment (dict of int: [int]): A mapping containing 10 | partition id and replicas to assign to it. 11 | topic_configs (dict of str: str): A mapping of config key 12 | and value for the topic. 13 | """ 14 | def __init__( 15 | self, 16 | name, 17 | num_partitions=-1, 18 | replication_factor=-1, 19 | replica_assignments=None, 20 | topic_configs=None, 21 | ): 22 | self.name = name 23 | self.num_partitions = num_partitions 24 | self.replication_factor = replication_factor 25 | self.replica_assignments = replica_assignments or {} 26 | self.topic_configs = topic_configs or {} 27 | -------------------------------------------------------------------------------- /kafka/protocol/sasl_handshake.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Array, Int16, Schema, String 3 | 4 | 5 | class SaslHandshakeResponse_v0(Response): 6 | API_KEY = 17 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('error_code', Int16), 10 | ('enabled_mechanisms', Array(String('utf-8'))) 11 | ) 12 | 13 | 14 | class SaslHandshakeResponse_v1(Response): 15 | API_KEY = 17 16 | API_VERSION = 1 17 | SCHEMA = SaslHandshakeResponse_v0.SCHEMA 18 | 19 | 20 | class SaslHandshakeRequest_v0(Request): 21 | API_KEY = 17 22 | API_VERSION = 0 23 | RESPONSE_TYPE = SaslHandshakeResponse_v0 24 | SCHEMA = Schema( 25 | ('mechanism', String('utf-8')) 26 | ) 27 | 28 | 29 | class SaslHandshakeRequest_v1(Request): 30 | API_KEY = 17 31 | API_VERSION = 1 32 | RESPONSE_TYPE = SaslHandshakeResponse_v1 33 | SCHEMA = SaslHandshakeRequest_v0.SCHEMA 34 | 35 | 36 | SaslHandshakeRequest = [SaslHandshakeRequest_v0, SaslHandshakeRequest_v1] 37 | SaslHandshakeResponse = [SaslHandshakeResponse_v0, SaslHandshakeResponse_v1] 38 | -------------------------------------------------------------------------------- /kafka/metrics/kafka_metric.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | class KafkaMetric(object): 5 | __slots__ = ('_metric_name', '_measurable', '_config') 6 | 7 | # NOTE java constructor takes a lock instance 8 | def __init__(self, metric_name, measurable, config): 9 | if not metric_name: 10 | raise ValueError('metric_name must be non-empty') 11 | if not measurable: 12 | raise ValueError('measurable must be non-empty') 13 | self._metric_name = metric_name 14 | self._measurable = measurable 15 | self._config = config 16 | 17 | @property 18 | def metric_name(self): 19 | return self._metric_name 20 | 21 | @property 22 | def measurable(self): 23 | return self._measurable 24 | 25 | @property 26 | def config(self): 27 | return self._config 28 | 29 | @config.setter 30 | def config(self, config): 31 | self._config = config 32 | 33 | def value(self, time_ms=None): 34 | if time_ms is None: 35 | time_ms = time.time() * 1000 36 | return self._measurable.measure(self._config, time_ms) 37 | -------------------------------------------------------------------------------- /kafka/coordinator/protocol.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.struct import Struct 2 | from kafka.protocol.types import Array, Bytes, Int16, Int32, Schema, String 3 | from kafka.structs import TopicPartition 4 | 5 | 6 | class ConsumerProtocolMemberMetadata_v0(Struct): 7 | SCHEMA = Schema( 8 | ('version', Int16), 9 | ('topics', Array(String('utf-8'))), 10 | ('user_data', Bytes)) 11 | 12 | 13 | class ConsumerProtocolMemberAssignment_v0(Struct): 14 | SCHEMA = Schema( 15 | ('version', Int16), 16 | ('assignment', Array( 17 | ('topic', String('utf-8')), 18 | ('partitions', Array(Int32)))), 19 | ('user_data', Bytes)) 20 | 21 | def partitions(self): 22 | return [TopicPartition(topic, partition) 23 | for topic, partitions in self.assignment # pylint: disable-msg=no-member 24 | for partition in partitions] 25 | 26 | 27 | class ConsumerProtocol_v0(object): 28 | PROTOCOL_TYPE = 'consumer' 29 | METADATA = ConsumerProtocolMemberMetadata_v0 30 | ASSIGNMENT = ConsumerProtocolMemberAssignment_v0 31 | 32 | 33 | ConsumerProtocol = [ConsumerProtocol_v0] 34 | -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.9.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.9.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.1.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.1.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.3.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/trunk/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.12" 12 | # You can also specify other tool versions: 13 | # nodejs: "20" 14 | # rust: "1.70" 15 | # golang: "1.20" 16 | 17 | # Build documentation in the "docs/" directory with Sphinx 18 | sphinx: 19 | configuration: docs/conf.py 20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs 21 | # builder: "dirhtml" 22 | # Fail on all warnings to avoid broken references 23 | # fail_on_warning: true 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: docs/requirements.txt 36 | -------------------------------------------------------------------------------- /servers/2.4.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /servers/2.5.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /servers/2.6.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /servers/resources/default/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /kafka/admin/config_resource.py: -------------------------------------------------------------------------------- 1 | from enum import IntEnum 2 | 3 | 4 | class ConfigResourceType(IntEnum): 5 | """An enumerated type of config resources""" 6 | 7 | BROKER = 4, 8 | TOPIC = 2 9 | 10 | 11 | class ConfigResource(object): 12 | """A class for specifying config resources. 13 | Arguments: 14 | resource_type (ConfigResourceType): the type of kafka resource 15 | name (string): The name of the kafka resource 16 | configs ({key : value}): A maps of config keys to values. 17 | """ 18 | 19 | def __init__( 20 | self, 21 | resource_type, 22 | name, 23 | configs=None 24 | ): 25 | if not isinstance(resource_type, (ConfigResourceType)): 26 | resource_type = ConfigResourceType[str(resource_type).upper()] # pylint: disable-msg=unsubscriptable-object 27 | self.resource_type = resource_type 28 | self.name = name 29 | self.configs = configs 30 | 31 | def __str__(self): 32 | return "ConfigResource %s=%s" % (self.resource_type, self.name) 33 | 34 | def __repr__(self): 35 | return "ConfigResource(%s, %s, %s)" % (self.resource_type, self.name, self.configs) 36 | -------------------------------------------------------------------------------- /kafka/sasl/__init__.py: -------------------------------------------------------------------------------- 1 | import platform 2 | 3 | from kafka.sasl.gssapi import SaslMechanismGSSAPI 4 | from kafka.sasl.msk import SaslMechanismAwsMskIam 5 | from kafka.sasl.oauth import SaslMechanismOAuth 6 | from kafka.sasl.plain import SaslMechanismPlain 7 | from kafka.sasl.scram import SaslMechanismScram 8 | from kafka.sasl.sspi import SaslMechanismSSPI 9 | 10 | 11 | SASL_MECHANISMS = {} 12 | 13 | 14 | def register_sasl_mechanism(name, klass, overwrite=False): 15 | if not overwrite and name in SASL_MECHANISMS: 16 | raise ValueError('Sasl mechanism %s already defined!' % name) 17 | SASL_MECHANISMS[name] = klass 18 | 19 | 20 | def get_sasl_mechanism(name): 21 | return SASL_MECHANISMS[name] 22 | 23 | 24 | register_sasl_mechanism('AWS_MSK_IAM', SaslMechanismAwsMskIam) 25 | if platform.system() == 'Windows': 26 | register_sasl_mechanism('GSSAPI', SaslMechanismSSPI) 27 | else: 28 | register_sasl_mechanism('GSSAPI', SaslMechanismGSSAPI) 29 | register_sasl_mechanism('OAUTHBEARER', SaslMechanismOAuth) 30 | register_sasl_mechanism('PLAIN', SaslMechanismPlain) 31 | register_sasl_mechanism('SCRAM-SHA-256', SaslMechanismScram) 32 | register_sasl_mechanism('SCRAM-SHA-512', SaslMechanismScram) 33 | -------------------------------------------------------------------------------- /test/protocol/test_api.py: -------------------------------------------------------------------------------- 1 | import struct 2 | 3 | import pytest 4 | 5 | from kafka.protocol.api import RequestHeader 6 | from kafka.protocol.fetch import FetchRequest 7 | from kafka.protocol.find_coordinator import FindCoordinatorRequest 8 | from kafka.protocol.metadata import MetadataRequest 9 | 10 | 11 | def test_encode_message_header(): 12 | expect = b''.join([ 13 | struct.pack('>h', 10), # API Key 14 | struct.pack('>h', 0), # API Version 15 | struct.pack('>i', 4), # Correlation Id 16 | struct.pack('>h', len('client3')), # Length of clientId 17 | b'client3', # ClientId 18 | ]) 19 | 20 | req = FindCoordinatorRequest[0]('foo') 21 | header = RequestHeader(req, correlation_id=4, client_id='client3') 22 | assert header.encode() == expect 23 | 24 | 25 | def test_struct_unrecognized_kwargs(): 26 | try: 27 | _mr = MetadataRequest[0](topicz='foo') 28 | assert False, 'Structs should not allow unrecognized kwargs' 29 | except ValueError: 30 | pass 31 | 32 | 33 | def test_struct_missing_kwargs(): 34 | fr = FetchRequest[0](max_wait_time=100) 35 | assert fr.min_bytes is None 36 | -------------------------------------------------------------------------------- /servers/trunk/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.logger.kafka=DEBUG, stdout 23 | log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, stdout 24 | log4j.logger.org.apache.zookeeper=INFO, stdout 25 | -------------------------------------------------------------------------------- /test/test_producer.py: -------------------------------------------------------------------------------- 1 | import gc 2 | import platform 3 | import threading 4 | 5 | import pytest 6 | 7 | from kafka import KafkaProducer 8 | from kafka.cluster import ClusterMetadata 9 | from kafka.producer.transaction_manager import TransactionManager, ProducerIdAndEpoch 10 | 11 | 12 | def test_kafka_producer_thread_close(): 13 | threads = threading.active_count() 14 | producer = KafkaProducer(api_version=(2, 1)) # set api_version explicitly to avoid auto-detection 15 | assert threading.active_count() == threads + 1 16 | producer.close() 17 | assert threading.active_count() == threads 18 | 19 | 20 | def test_idempotent_producer_reset_producer_id(): 21 | transaction_manager = TransactionManager( 22 | transactional_id=None, 23 | transaction_timeout_ms=1000, 24 | retry_backoff_ms=100, 25 | api_version=(0, 11), 26 | metadata=ClusterMetadata(), 27 | ) 28 | 29 | test_producer_id_and_epoch = ProducerIdAndEpoch(123, 456) 30 | transaction_manager.set_producer_id_and_epoch(test_producer_id_and_epoch) 31 | assert transaction_manager.producer_id_and_epoch == test_producer_id_and_epoch 32 | transaction_manager.reset_producer_id() 33 | assert transaction_manager.producer_id_and_epoch == ProducerIdAndEpoch(-1, -1) 34 | -------------------------------------------------------------------------------- /kafka/protocol/init_producer_id.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Int16, Int32, Int64, Schema, String 3 | 4 | 5 | class InitProducerIdResponse_v0(Response): 6 | API_KEY = 22 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('throttle_time_ms', Int32), 10 | ('error_code', Int16), 11 | ('producer_id', Int64), 12 | ('producer_epoch', Int16), 13 | ) 14 | 15 | 16 | class InitProducerIdResponse_v1(Response): 17 | API_KEY = 22 18 | API_VERSION = 1 19 | SCHEMA = InitProducerIdResponse_v0.SCHEMA 20 | 21 | 22 | class InitProducerIdRequest_v0(Request): 23 | API_KEY = 22 24 | API_VERSION = 0 25 | RESPONSE_TYPE = InitProducerIdResponse_v0 26 | SCHEMA = Schema( 27 | ('transactional_id', String('utf-8')), 28 | ('transaction_timeout_ms', Int32), 29 | ) 30 | 31 | 32 | class InitProducerIdRequest_v1(Request): 33 | API_KEY = 22 34 | API_VERSION = 1 35 | RESPONSE_TYPE = InitProducerIdResponse_v1 36 | SCHEMA = InitProducerIdRequest_v0.SCHEMA 37 | 38 | 39 | InitProducerIdRequest = [ 40 | InitProducerIdRequest_v0, InitProducerIdRequest_v1, 41 | ] 42 | InitProducerIdResponse = [ 43 | InitProducerIdResponse_v0, InitProducerIdResponse_v1, 44 | ] 45 | -------------------------------------------------------------------------------- /kafka/protocol/sasl_authenticate.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Bytes, Int16, Int64, Schema, String 3 | 4 | 5 | class SaslAuthenticateResponse_v0(Response): 6 | API_KEY = 36 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('error_code', Int16), 10 | ('error_message', String('utf-8')), 11 | ('auth_bytes', Bytes)) 12 | 13 | 14 | class SaslAuthenticateResponse_v1(Response): 15 | API_KEY = 36 16 | API_VERSION = 1 17 | SCHEMA = Schema( 18 | ('error_code', Int16), 19 | ('error_message', String('utf-8')), 20 | ('auth_bytes', Bytes), 21 | ('session_lifetime_ms', Int64)) 22 | 23 | 24 | class SaslAuthenticateRequest_v0(Request): 25 | API_KEY = 36 26 | API_VERSION = 0 27 | RESPONSE_TYPE = SaslAuthenticateResponse_v0 28 | SCHEMA = Schema( 29 | ('auth_bytes', Bytes)) 30 | 31 | 32 | class SaslAuthenticateRequest_v1(Request): 33 | API_KEY = 36 34 | API_VERSION = 1 35 | RESPONSE_TYPE = SaslAuthenticateResponse_v1 36 | SCHEMA = SaslAuthenticateRequest_v0.SCHEMA 37 | 38 | 39 | SaslAuthenticateRequest = [SaslAuthenticateRequest_v0, SaslAuthenticateRequest_v1] 40 | SaslAuthenticateResponse = [SaslAuthenticateResponse_v0, SaslAuthenticateResponse_v1] 41 | -------------------------------------------------------------------------------- /kafka/metrics/quota.py: -------------------------------------------------------------------------------- 1 | class Quota(object): 2 | """An upper or lower bound for metrics""" 3 | __slots__ = ('_bound', '_upper') 4 | 5 | def __init__(self, bound, is_upper): 6 | self._bound = bound 7 | self._upper = is_upper 8 | 9 | @staticmethod 10 | def upper_bound(upper_bound): 11 | return Quota(upper_bound, True) 12 | 13 | @staticmethod 14 | def lower_bound(lower_bound): 15 | return Quota(lower_bound, False) 16 | 17 | def is_upper_bound(self): 18 | return self._upper 19 | 20 | @property 21 | def bound(self): 22 | return self._bound 23 | 24 | def is_acceptable(self, value): 25 | return ((self.is_upper_bound() and value <= self.bound) or 26 | (not self.is_upper_bound() and value >= self.bound)) 27 | 28 | def __hash__(self): 29 | prime = 31 30 | result = prime + self.bound 31 | return prime * result + self.is_upper_bound() 32 | 33 | def __eq__(self, other): 34 | if self is other: 35 | return True 36 | return (isinstance(self, type(other)) and 37 | self.bound == other.bound and 38 | self.is_upper_bound() == other.is_upper_bound()) 39 | 40 | def __ne__(self, other): 41 | return not self.__eq__(other) 42 | -------------------------------------------------------------------------------- /kafka/metrics/metric_config.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | class MetricConfig(object): 5 | """Configuration values for metrics""" 6 | __slots__ = ('quota', '_samples', 'event_window', 'time_window_ms', 'tags') 7 | 8 | def __init__(self, quota=None, samples=2, event_window=sys.maxsize, 9 | time_window_ms=30 * 1000, tags=None): 10 | """ 11 | Arguments: 12 | quota (Quota, optional): Upper or lower bound of a value. 13 | samples (int, optional): Max number of samples kept per metric. 14 | event_window (int, optional): Max number of values per sample. 15 | time_window_ms (int, optional): Max age of an individual sample. 16 | tags (dict of {str: str}, optional): Tags for each metric. 17 | """ 18 | self.quota = quota 19 | self._samples = samples 20 | self.event_window = event_window 21 | self.time_window_ms = time_window_ms 22 | # tags should be OrderedDict (not supported in py26) 23 | self.tags = tags if tags else {} 24 | 25 | @property 26 | def samples(self): 27 | return self._samples 28 | 29 | @samples.setter 30 | def samples(self, value): 31 | if value < 1: 32 | raise ValueError('The number of samples must be at least 1.') 33 | self._samples = value 34 | -------------------------------------------------------------------------------- /kafka/protocol/__init__.py: -------------------------------------------------------------------------------- 1 | API_KEYS = { 2 | 0: 'Produce', 3 | 1: 'Fetch', 4 | 2: 'ListOffsets', 5 | 3: 'Metadata', 6 | 4: 'LeaderAndIsr', 7 | 5: 'StopReplica', 8 | 6: 'UpdateMetadata', 9 | 7: 'ControlledShutdown', 10 | 8: 'OffsetCommit', 11 | 9: 'OffsetFetch', 12 | 10: 'FindCoordinator', 13 | 11: 'JoinGroup', 14 | 12: 'Heartbeat', 15 | 13: 'LeaveGroup', 16 | 14: 'SyncGroup', 17 | 15: 'DescribeGroups', 18 | 16: 'ListGroups', 19 | 17: 'SaslHandshake', 20 | 18: 'ApiVersions', 21 | 19: 'CreateTopics', 22 | 20: 'DeleteTopics', 23 | 21: 'DeleteRecords', 24 | 22: 'InitProducerId', 25 | 23: 'OffsetForLeaderEpoch', 26 | 24: 'AddPartitionsToTxn', 27 | 25: 'AddOffsetsToTxn', 28 | 26: 'EndTxn', 29 | 27: 'WriteTxnMarkers', 30 | 28: 'TxnOffsetCommit', 31 | 29: 'DescribeAcls', 32 | 30: 'CreateAcls', 33 | 31: 'DeleteAcls', 34 | 32: 'DescribeConfigs', 35 | 33: 'AlterConfigs', 36 | 36: 'SaslAuthenticate', 37 | 37: 'CreatePartitions', 38 | 38: 'CreateDelegationToken', 39 | 39: 'RenewDelegationToken', 40 | 40: 'ExpireDelegationToken', 41 | 41: 'DescribeDelegationToken', 42 | 42: 'DeleteGroups', 43 | 45: 'AlterPartitionReassignments', 44 | 46: 'ListPartitionReassignments', 45 | 48: 'DescribeClientQuotas', 46 | } 47 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.1.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.1.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.3.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.4.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.5.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.6.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.9.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.9.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/resources/default/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /test/protocol/test_compact.py: -------------------------------------------------------------------------------- 1 | import io 2 | import struct 3 | 4 | import pytest 5 | 6 | from kafka.protocol.types import CompactString, CompactArray, CompactBytes 7 | 8 | 9 | def test_compact_data_structs(): 10 | cs = CompactString() 11 | encoded = cs.encode(None) 12 | assert encoded == struct.pack('B', 0) 13 | decoded = cs.decode(io.BytesIO(encoded)) 14 | assert decoded is None 15 | assert b'\x01' == cs.encode('') 16 | assert '' == cs.decode(io.BytesIO(b'\x01')) 17 | encoded = cs.encode("foobarbaz") 18 | assert cs.decode(io.BytesIO(encoded)) == "foobarbaz" 19 | 20 | arr = CompactArray(CompactString()) 21 | assert arr.encode(None) == b'\x00' 22 | assert arr.decode(io.BytesIO(b'\x00')) is None 23 | enc = arr.encode([]) 24 | assert enc == b'\x01' 25 | assert [] == arr.decode(io.BytesIO(enc)) 26 | encoded = arr.encode(["foo", "bar", "baz", "quux"]) 27 | assert arr.decode(io.BytesIO(encoded)) == ["foo", "bar", "baz", "quux"] 28 | 29 | enc = CompactBytes.encode(None) 30 | assert enc == b'\x00' 31 | assert CompactBytes.decode(io.BytesIO(b'\x00')) is None 32 | enc = CompactBytes.encode(b'') 33 | assert enc == b'\x01' 34 | assert CompactBytes.decode(io.BytesIO(b'\x01')) == b'' 35 | enc = CompactBytes.encode(b'foo') 36 | assert CompactBytes.decode(io.BytesIO(enc)) == b'foo' 37 | 38 | 39 | -------------------------------------------------------------------------------- /kafka/sasl/plain.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from kafka.sasl.abc import SaslMechanism 4 | 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class SaslMechanismPlain(SaslMechanism): 10 | 11 | def __init__(self, **config): 12 | if config.get('security_protocol', '') == 'SASL_PLAINTEXT': 13 | log.warning('Sending username and password in the clear') 14 | assert 'sasl_plain_username' in config, 'sasl_plain_username required for PLAIN sasl' 15 | assert 'sasl_plain_password' in config, 'sasl_plain_password required for PLAIN sasl' 16 | 17 | self.username = config['sasl_plain_username'] 18 | self.password = config['sasl_plain_password'] 19 | self._is_done = False 20 | self._is_authenticated = False 21 | 22 | def auth_bytes(self): 23 | # Send PLAIN credentials per RFC-4616 24 | return bytes('\0'.join([self.username, self.username, self.password]).encode('utf-8')) 25 | 26 | def receive(self, auth_bytes): 27 | self._is_done = True 28 | self._is_authenticated = auth_bytes == b'' 29 | 30 | def is_done(self): 31 | return self._is_done 32 | 33 | def is_authenticated(self): 34 | return self._is_authenticated 35 | 36 | def auth_details(self): 37 | if not self.is_authenticated: 38 | raise RuntimeError('Not authenticated yet!') 39 | return 'Authenticated as %s via SASL / Plain' % self.username 40 | -------------------------------------------------------------------------------- /test/test_partitioner.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kafka.partitioner import DefaultPartitioner, murmur2 4 | 5 | 6 | def test_default_partitioner(): 7 | partitioner = DefaultPartitioner() 8 | all_partitions = available = list(range(100)) 9 | # partitioner should return the same partition for the same key 10 | p1 = partitioner(b'foo', all_partitions, available) 11 | p2 = partitioner(b'foo', all_partitions, available) 12 | assert p1 == p2 13 | assert p1 in all_partitions 14 | 15 | # when key is None, choose one of available partitions 16 | assert partitioner(None, all_partitions, [123]) == 123 17 | 18 | # with fallback to all_partitions 19 | assert partitioner(None, all_partitions, []) in all_partitions 20 | 21 | 22 | @pytest.mark.parametrize("bytes_payload,partition_number", [ 23 | (b'', 681), (b'a', 524), (b'ab', 434), (b'abc', 107), (b'123456789', 566), 24 | (b'\x00 ', 742) 25 | ]) 26 | def test_murmur2_java_compatibility(bytes_payload, partition_number): 27 | partitioner = DefaultPartitioner() 28 | all_partitions = available = list(range(1000)) 29 | # compare with output from Kafka's org.apache.kafka.clients.producer.Partitioner 30 | assert partitioner(bytes_payload, all_partitions, available) == partition_number 31 | 32 | 33 | def test_murmur2_not_ascii(): 34 | # Verify no regression of murmur2() bug encoding py2 bytes that don't ascii encode 35 | murmur2(b'\xa4') 36 | murmur2(b'\x81' * 1000) 37 | -------------------------------------------------------------------------------- /kafka/protocol/end_txn.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Boolean, Int16, Int32, Int64, Schema, String 3 | 4 | 5 | class EndTxnResponse_v0(Response): 6 | API_KEY = 26 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('throttle_time_ms', Int32), 10 | ('error_code', Int16), 11 | ) 12 | 13 | 14 | class EndTxnResponse_v1(Response): 15 | API_KEY = 26 16 | API_VERSION = 1 17 | SCHEMA = EndTxnResponse_v0.SCHEMA 18 | 19 | 20 | class EndTxnResponse_v2(Response): 21 | API_KEY = 26 22 | API_VERSION = 2 23 | SCHEMA = EndTxnResponse_v1.SCHEMA 24 | 25 | 26 | class EndTxnRequest_v0(Request): 27 | API_KEY = 26 28 | API_VERSION = 0 29 | RESPONSE_TYPE = EndTxnResponse_v0 30 | SCHEMA = Schema( 31 | ('transactional_id', String('utf-8')), 32 | ('producer_id', Int64), 33 | ('producer_epoch', Int16), 34 | ('committed', Boolean)) 35 | 36 | 37 | class EndTxnRequest_v1(Request): 38 | API_KEY = 26 39 | API_VERSION = 1 40 | RESPONSE_TYPE = EndTxnResponse_v1 41 | SCHEMA = EndTxnRequest_v0.SCHEMA 42 | 43 | 44 | class EndTxnRequest_v2(Request): 45 | API_KEY = 26 46 | API_VERSION = 2 47 | RESPONSE_TYPE = EndTxnResponse_v2 48 | SCHEMA = EndTxnRequest_v1.SCHEMA 49 | 50 | 51 | EndTxnRequest = [ 52 | EndTxnRequest_v0, EndTxnRequest_v1, EndTxnRequest_v2, 53 | ] 54 | EndTxnResponse = [ 55 | EndTxnResponse_v0, EndTxnResponse_v1, EndTxnResponse_v2, 56 | ] 57 | -------------------------------------------------------------------------------- /test/sasl/test_gssapi.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | 3 | from kafka.sasl import get_sasl_mechanism 4 | import kafka.sasl.gssapi 5 | 6 | 7 | def test_gssapi(): 8 | config = { 9 | 'sasl_kerberos_domain_name': 'foo', 10 | 'sasl_kerberos_service_name': 'bar', 11 | } 12 | client_ctx = mock.Mock() 13 | client_ctx.step.side_effect = [b'init', b'exchange', b'complete', b'xxxx'] 14 | client_ctx.complete = False 15 | def mocked_message_wrapper(msg, *args): 16 | wrapped = mock.Mock() 17 | type(wrapped).message = mock.PropertyMock(return_value=msg) 18 | return wrapped 19 | client_ctx.unwrap.side_effect = mocked_message_wrapper 20 | client_ctx.wrap.side_effect = mocked_message_wrapper 21 | kafka.sasl.gssapi.gssapi = mock.Mock() 22 | kafka.sasl.gssapi.gssapi.SecurityContext.return_value = client_ctx 23 | gssapi = get_sasl_mechanism('GSSAPI')(**config) 24 | assert isinstance(gssapi, kafka.sasl.gssapi.SaslMechanismGSSAPI) 25 | client_ctx.step.assert_called_with(None) 26 | 27 | while not gssapi.is_done(): 28 | send_token = gssapi.auth_bytes() 29 | receive_token = send_token # not realistic, but enough for testing 30 | if send_token == b'\x01ompletebar@foo': # final wrapped message 31 | receive_token = b'' # final message gets an empty response 32 | gssapi.receive(receive_token) 33 | if client_ctx.step.call_count == 3: 34 | client_ctx.complete = True 35 | 36 | assert gssapi.is_done() 37 | assert gssapi.is_authenticated() 38 | -------------------------------------------------------------------------------- /kafka/metrics/metrics_reporter.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class AbstractMetricsReporter(object, metaclass=abc.ABCMeta): 5 | """ 6 | An abstract class to allow things to listen as new metrics 7 | are created so they can be reported. 8 | """ 9 | @abc.abstractmethod 10 | def init(self, metrics): 11 | """ 12 | This is called when the reporter is first registered 13 | to initially register all existing metrics 14 | 15 | Arguments: 16 | metrics (list of KafkaMetric): All currently existing metrics 17 | """ 18 | raise NotImplementedError 19 | 20 | @abc.abstractmethod 21 | def metric_change(self, metric): 22 | """ 23 | This is called whenever a metric is updated or added 24 | 25 | Arguments: 26 | metric (KafkaMetric) 27 | """ 28 | raise NotImplementedError 29 | 30 | @abc.abstractmethod 31 | def metric_removal(self, metric): 32 | """ 33 | This is called whenever a metric is removed 34 | 35 | Arguments: 36 | metric (KafkaMetric) 37 | """ 38 | raise NotImplementedError 39 | 40 | @abc.abstractmethod 41 | def configure(self, configs): 42 | """ 43 | Configure this class with the given key-value pairs 44 | 45 | Arguments: 46 | configs (dict of {str, ?}) 47 | """ 48 | raise NotImplementedError 49 | 50 | @abc.abstractmethod 51 | def close(self): 52 | """Called when the metrics repository is closed.""" 53 | raise NotImplementedError 54 | -------------------------------------------------------------------------------- /kafka/protocol/add_offsets_to_txn.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Int16, Int32, Int64, Schema, String 3 | 4 | 5 | class AddOffsetsToTxnResponse_v0(Response): 6 | API_KEY = 25 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('throttle_time_ms', Int32), 10 | ('error_code', Int16), 11 | ) 12 | 13 | 14 | class AddOffsetsToTxnResponse_v1(Response): 15 | API_KEY = 25 16 | API_VERSION = 1 17 | SCHEMA = AddOffsetsToTxnResponse_v0.SCHEMA 18 | 19 | 20 | class AddOffsetsToTxnResponse_v2(Response): 21 | API_KEY = 25 22 | API_VERSION = 2 23 | SCHEMA = AddOffsetsToTxnResponse_v1.SCHEMA 24 | 25 | 26 | class AddOffsetsToTxnRequest_v0(Request): 27 | API_KEY = 25 28 | API_VERSION = 0 29 | RESPONSE_TYPE = AddOffsetsToTxnResponse_v0 30 | SCHEMA = Schema( 31 | ('transactional_id', String('utf-8')), 32 | ('producer_id', Int64), 33 | ('producer_epoch', Int16), 34 | ('group_id', String('utf-8')), 35 | ) 36 | 37 | 38 | class AddOffsetsToTxnRequest_v1(Request): 39 | API_KEY = 25 40 | API_VERSION = 1 41 | RESPONSE_TYPE = AddOffsetsToTxnResponse_v1 42 | SCHEMA = AddOffsetsToTxnRequest_v0.SCHEMA 43 | 44 | 45 | class AddOffsetsToTxnRequest_v2(Request): 46 | API_KEY = 25 47 | API_VERSION = 2 48 | RESPONSE_TYPE = AddOffsetsToTxnResponse_v2 49 | SCHEMA = AddOffsetsToTxnRequest_v1.SCHEMA 50 | 51 | 52 | AddOffsetsToTxnRequest = [ 53 | AddOffsetsToTxnRequest_v0, AddOffsetsToTxnRequest_v1, AddOffsetsToTxnRequest_v2, 54 | ] 55 | AddOffsetsToTxnResponse = [ 56 | AddOffsetsToTxnResponse_v0, AddOffsetsToTxnResponse_v1, AddOffsetsToTxnResponse_v2, 57 | ] 58 | -------------------------------------------------------------------------------- /docs/tests.rst: -------------------------------------------------------------------------------- 1 | Tests 2 | ===== 3 | 4 | .. image:: https://coveralls.io/repos/dpkp/kafka-python/badge.svg?branch=master&service=github 5 | :target: https://coveralls.io/github/dpkp/kafka-python?branch=master 6 | .. image:: https://img.shields.io/github/actions/workflow/status/dpkp/kafka-python/python-package.yml 7 | :target: https://github.com/dpkp/kafka-python/actions/workflows/python-package.yml 8 | 9 | The test suite is run via pytest. 10 | 11 | Linting is run via pylint, but is currently skipped during CI/CD due to 12 | accumulated debt. We'd like to transition to ruff! 13 | 14 | For test coverage details, see https://coveralls.io/github/dpkp/kafka-python 15 | Coverage reporting is currently disabled as we have transitioned from travis 16 | to GH Actions and have not yet re-enabled coveralls integration. 17 | 18 | The test suite includes unit tests that mock network interfaces, as well as 19 | integration tests that setup and teardown kafka broker (and zookeeper) 20 | fixtures for client / consumer / producer testing. 21 | 22 | 23 | Unit tests 24 | ------------------ 25 | 26 | To run the tests locally, install test dependencies: 27 | 28 | .. code:: bash 29 | 30 | pip install -r requirements-dev.txt 31 | 32 | Then simply run pytest (or make test) from your preferred python + virtualenv. 33 | 34 | .. code:: bash 35 | 36 | # run protocol tests only (via pytest) 37 | pytest test/test_protocol.py 38 | 39 | # Run conn tests only (via make) 40 | PYTESTS=test/test_conn.py make test 41 | 42 | 43 | Integration tests 44 | ----------------- 45 | 46 | .. code:: bash 47 | 48 | KAFKA_VERSION=4.0.0 make test 49 | 50 | 51 | Integration tests start Kafka and Zookeeper fixtures. Make will download 52 | kafka server binaries automatically if needed. 53 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/abstract.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import logging 3 | 4 | log = logging.getLogger(__name__) 5 | 6 | 7 | class AbstractPartitionAssignor(object): 8 | """ 9 | Abstract assignor implementation which does some common grunt work (in particular collecting 10 | partition counts which are always needed in assignors). 11 | """ 12 | 13 | @abc.abstractproperty 14 | def name(self): 15 | """.name should be a string identifying the assignor""" 16 | pass 17 | 18 | @abc.abstractmethod 19 | def assign(self, cluster, members): 20 | """Perform group assignment given cluster metadata and member subscriptions 21 | 22 | Arguments: 23 | cluster (ClusterMetadata): metadata for use in assignment 24 | members (dict of {member_id: Subscription}): decoded metadata 25 | for each member in the group, including group_instance_id 26 | when available. 27 | 28 | Returns: 29 | dict: {member_id: MemberAssignment} 30 | """ 31 | pass 32 | 33 | @abc.abstractmethod 34 | def metadata(self, topics): 35 | """Generate ProtocolMetadata to be submitted via JoinGroupRequest. 36 | 37 | Arguments: 38 | topics (set): a member's subscribed topics 39 | 40 | Returns: 41 | MemberMetadata struct 42 | """ 43 | pass 44 | 45 | @abc.abstractmethod 46 | def on_assignment(self, assignment): 47 | """Callback that runs on each assignment. 48 | 49 | This method can be used to update internal state, if any, of the 50 | partition assignor. 51 | 52 | Arguments: 53 | assignment (MemberAssignment): the member's assignment 54 | """ 55 | pass 56 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def metrics(): 6 | from kafka.metrics import Metrics 7 | 8 | metrics = Metrics() 9 | try: 10 | yield metrics 11 | finally: 12 | metrics.close() 13 | 14 | 15 | @pytest.fixture 16 | def conn(mocker): 17 | """Return a connection mocker fixture""" 18 | from kafka.conn import ConnectionStates 19 | from kafka.future import Future 20 | from kafka.protocol.metadata import MetadataResponse 21 | conn = mocker.patch('kafka.client_async.BrokerConnection') 22 | conn.return_value = conn 23 | conn.state = ConnectionStates.CONNECTED 24 | conn.send.return_value = Future().success( 25 | MetadataResponse[0]( 26 | [(0, 'foo', 12), (1, 'bar', 34)], # brokers 27 | [])) # topics 28 | conn.connection_delay.return_value = 0 29 | conn.blacked_out.return_value = False 30 | conn.next_ifr_request_timeout_ms.return_value = float('inf') 31 | def _set_conn_state(state): 32 | conn.state = state 33 | return state 34 | conn._set_conn_state = _set_conn_state 35 | conn.connect.side_effect = lambda: conn.state 36 | conn.connect_blocking.return_value = True 37 | conn.connecting = lambda: conn.state in (ConnectionStates.CONNECTING, 38 | ConnectionStates.HANDSHAKE) 39 | conn.connected = lambda: conn.state is ConnectionStates.CONNECTED 40 | conn.disconnected = lambda: conn.state is ConnectionStates.DISCONNECTED 41 | return conn 42 | 43 | 44 | @pytest.fixture 45 | def client(conn, mocker): 46 | from kafka import KafkaClient 47 | 48 | cli = KafkaClient(api_version=(0, 9)) 49 | mocker.patch.object(cli, '_init_connect', return_value=True) 50 | try: 51 | yield cli 52 | finally: 53 | cli._close() 54 | -------------------------------------------------------------------------------- /kafka/protocol/find_coordinator.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Int8, Int16, Int32, Schema, String 3 | 4 | 5 | class FindCoordinatorResponse_v0(Response): 6 | API_KEY = 10 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('error_code', Int16), 10 | ('coordinator_id', Int32), 11 | ('host', String('utf-8')), 12 | ('port', Int32) 13 | ) 14 | 15 | 16 | class FindCoordinatorResponse_v1(Response): 17 | API_KEY = 10 18 | API_VERSION = 1 19 | SCHEMA = Schema( 20 | ('throttle_time_ms', Int32), 21 | ('error_code', Int16), 22 | ('error_message', String('utf-8')), 23 | ('coordinator_id', Int32), 24 | ('host', String('utf-8')), 25 | ('port', Int32) 26 | ) 27 | 28 | 29 | class FindCoordinatorResponse_v2(Response): 30 | API_KEY = 10 31 | API_VERSION = 2 32 | SCHEMA = FindCoordinatorResponse_v1.SCHEMA 33 | 34 | 35 | class FindCoordinatorRequest_v0(Request): 36 | API_KEY = 10 37 | API_VERSION = 0 38 | RESPONSE_TYPE = FindCoordinatorResponse_v0 39 | SCHEMA = Schema( 40 | ('consumer_group', String('utf-8')) 41 | ) 42 | 43 | 44 | class FindCoordinatorRequest_v1(Request): 45 | API_KEY = 10 46 | API_VERSION = 1 47 | RESPONSE_TYPE = FindCoordinatorResponse_v1 48 | SCHEMA = Schema( 49 | ('coordinator_key', String('utf-8')), 50 | ('coordinator_type', Int8) # 0: consumer, 1: transaction 51 | ) 52 | 53 | 54 | class FindCoordinatorRequest_v2(Request): 55 | API_KEY = 10 56 | API_VERSION = 2 57 | RESPONSE_TYPE = FindCoordinatorResponse_v2 58 | SCHEMA = FindCoordinatorRequest_v1.SCHEMA 59 | 60 | 61 | FindCoordinatorRequest = [FindCoordinatorRequest_v0, FindCoordinatorRequest_v1, FindCoordinatorRequest_v2] 62 | FindCoordinatorResponse = [FindCoordinatorResponse_v0, FindCoordinatorResponse_v1, FindCoordinatorResponse_v2] 63 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.2"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "kafka-python" 7 | dynamic = ["version"] 8 | authors = [{name = "Dana Powers", email = "dana.powers@gmail.com"}] 9 | description = "Pure Python client for Apache Kafka" 10 | keywords = ["apache kafka", "kafka"] 11 | readme = "README.rst" 12 | classifiers = [ 13 | "Development Status :: 5 - Production/Stable", 14 | "Intended Audience :: Developers", 15 | "License :: OSI Approved :: Apache Software License", 16 | "Programming Language :: Python", 17 | "Programming Language :: Python :: 3", 18 | "Programming Language :: Python :: 3.6", 19 | "Programming Language :: Python :: 3.7", 20 | "Programming Language :: Python :: 3.8", 21 | "Programming Language :: Python :: 3.9", 22 | "Programming Language :: Python :: 3.10", 23 | "Programming Language :: Python :: 3.11", 24 | "Programming Language :: Python :: 3.12", 25 | "Programming Language :: Python :: 3.13", 26 | "Programming Language :: Python :: 3.14", 27 | "Programming Language :: Python :: Implementation :: CPython", 28 | "Programming Language :: Python :: Implementation :: PyPy", 29 | "Topic :: Software Development :: Libraries :: Python Modules", 30 | ] 31 | urls = {Homepage = "https://github.com/dpkp/kafka-python"} 32 | 33 | [project.optional-dependencies] 34 | crc32c = ["crc32c"] 35 | lz4 = ["lz4"] 36 | snappy = ["python-snappy"] 37 | zstd = ["zstandard"] 38 | testing = ["pytest", "pytest-mock", "pytest-timeout"] 39 | benchmarks = ["pyperf"] 40 | 41 | [tool.setuptools] 42 | include-package-data = false 43 | license-files = [] # workaround for https://github.com/pypa/setuptools/issues/4759 44 | 45 | [tool.setuptools.packages.find] 46 | exclude = ["test"] 47 | namespaces = false 48 | 49 | [tool.distutils.bdist_wheel] 50 | universal = 1 51 | 52 | [tool.setuptools.dynamic] 53 | version = {attr = "kafka.__version__"} 54 | -------------------------------------------------------------------------------- /kafka/protocol/add_partitions_to_txn.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Array, Int16, Int32, Int64, Schema, String 3 | 4 | 5 | class AddPartitionsToTxnResponse_v0(Response): 6 | API_KEY = 24 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('throttle_time_ms', Int32), 10 | ('results', Array( 11 | ('topic', String('utf-8')), 12 | ('partitions', Array( 13 | ('partition', Int32), 14 | ('error_code', Int16)))))) 15 | 16 | 17 | class AddPartitionsToTxnResponse_v1(Response): 18 | API_KEY = 24 19 | API_VERSION = 1 20 | SCHEMA = AddPartitionsToTxnResponse_v0.SCHEMA 21 | 22 | 23 | class AddPartitionsToTxnResponse_v2(Response): 24 | API_KEY = 24 25 | API_VERSION = 2 26 | SCHEMA = AddPartitionsToTxnResponse_v1.SCHEMA 27 | 28 | 29 | class AddPartitionsToTxnRequest_v0(Request): 30 | API_KEY = 24 31 | API_VERSION = 0 32 | RESPONSE_TYPE = AddPartitionsToTxnResponse_v0 33 | SCHEMA = Schema( 34 | ('transactional_id', String('utf-8')), 35 | ('producer_id', Int64), 36 | ('producer_epoch', Int16), 37 | ('topics', Array( 38 | ('topic', String('utf-8')), 39 | ('partitions', Array(Int32))))) 40 | 41 | 42 | class AddPartitionsToTxnRequest_v1(Request): 43 | API_KEY = 24 44 | API_VERSION = 1 45 | RESPONSE_TYPE = AddPartitionsToTxnResponse_v1 46 | SCHEMA = AddPartitionsToTxnRequest_v0.SCHEMA 47 | 48 | 49 | class AddPartitionsToTxnRequest_v2(Request): 50 | API_KEY = 24 51 | API_VERSION = 2 52 | RESPONSE_TYPE = AddPartitionsToTxnResponse_v2 53 | SCHEMA = AddPartitionsToTxnRequest_v1.SCHEMA 54 | 55 | 56 | AddPartitionsToTxnRequest = [ 57 | AddPartitionsToTxnRequest_v0, AddPartitionsToTxnRequest_v1, AddPartitionsToTxnRequest_v2, 58 | ] 59 | AddPartitionsToTxnResponse = [ 60 | AddPartitionsToTxnResponse_v0, AddPartitionsToTxnResponse_v1, AddPartitionsToTxnResponse_v2, 61 | ] 62 | -------------------------------------------------------------------------------- /test/test_subscription_state.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kafka import TopicPartition 4 | from kafka.consumer.subscription_state import SubscriptionState, TopicPartitionState 5 | 6 | 7 | def test_type_error(): 8 | s = SubscriptionState() 9 | with pytest.raises(TypeError): 10 | s.subscribe(topics='foo') 11 | 12 | s.subscribe(topics=['foo']) 13 | 14 | 15 | def test_change_subscription(): 16 | s = SubscriptionState() 17 | s.subscribe(topics=['foo']) 18 | assert s.subscription == set(['foo']) 19 | s.change_subscription(['bar']) 20 | assert s.subscription == set(['bar']) 21 | 22 | 23 | def test_group_subscribe(): 24 | s = SubscriptionState() 25 | s.subscribe(topics=['foo']) 26 | assert s.subscription == set(['foo']) 27 | s.group_subscribe(['bar']) 28 | assert s.subscription == set(['foo']) 29 | assert s._group_subscription == set(['foo', 'bar']) 30 | 31 | s.reset_group_subscription() 32 | assert s.subscription == set(['foo']) 33 | assert s._group_subscription == set(['foo']) 34 | 35 | 36 | def test_assign_from_subscribed(): 37 | s = SubscriptionState() 38 | s.subscribe(topics=['foo']) 39 | with pytest.raises(ValueError): 40 | s.assign_from_subscribed([TopicPartition('bar', 0)]) 41 | 42 | s.assign_from_subscribed([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 43 | assert set(s.assignment.keys()) == set([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 44 | assert all([isinstance(tps, TopicPartitionState) for tps in s.assignment.values()]) 45 | assert all([not tps.has_valid_position for tps in s.assignment.values()]) 46 | 47 | 48 | def test_change_subscription_after_assignment(): 49 | s = SubscriptionState() 50 | s.subscribe(topics=['foo']) 51 | s.assign_from_subscribed([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 52 | # Changing subscription retains existing assignment until next rebalance 53 | s.change_subscription(['bar']) 54 | assert set(s.assignment.keys()) == set([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 55 | -------------------------------------------------------------------------------- /test/test_consumer.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kafka import KafkaConsumer, TopicPartition 4 | from kafka.errors import KafkaConfigurationError, IllegalStateError 5 | 6 | 7 | def test_session_timeout_larger_than_request_timeout_raises(): 8 | with pytest.raises(KafkaConfigurationError): 9 | KafkaConsumer(bootstrap_servers='localhost:9092', api_version=(0, 9), group_id='foo', session_timeout_ms=50000, request_timeout_ms=40000) 10 | 11 | 12 | def test_fetch_max_wait_larger_than_request_timeout_raises(): 13 | with pytest.raises(KafkaConfigurationError): 14 | KafkaConsumer(bootstrap_servers='localhost:9092', fetch_max_wait_ms=50000, request_timeout_ms=40000) 15 | 16 | 17 | def test_request_timeout_larger_than_connections_max_idle_ms_raises(): 18 | with pytest.raises(KafkaConfigurationError): 19 | KafkaConsumer(bootstrap_servers='localhost:9092', api_version=(0, 9), request_timeout_ms=50000, connections_max_idle_ms=40000) 20 | 21 | 22 | def test_subscription_copy(): 23 | consumer = KafkaConsumer('foo', api_version=(0, 10, 0)) 24 | sub = consumer.subscription() 25 | assert sub is not consumer.subscription() 26 | assert sub == set(['foo']) 27 | sub.add('fizz') 28 | assert consumer.subscription() == set(['foo']) 29 | 30 | 31 | def test_assign(): 32 | # Consumer w/ subscription to topic 'foo' 33 | consumer = KafkaConsumer('foo', api_version=(0, 10, 0)) 34 | assert consumer.assignment() == set() 35 | # Cannot assign manually 36 | with pytest.raises(IllegalStateError): 37 | consumer.assign([TopicPartition('foo', 0)]) 38 | 39 | assert 'foo' in consumer._client._topics 40 | 41 | consumer = KafkaConsumer(api_version=(0, 10, 0)) 42 | assert consumer.assignment() == set() 43 | consumer.assign([TopicPartition('foo', 0)]) 44 | assert consumer.assignment() == set([TopicPartition('foo', 0)]) 45 | assert 'foo' in consumer._client._topics 46 | # Cannot subscribe 47 | with pytest.raises(IllegalStateError): 48 | consumer.subscribe(topics=['foo']) 49 | consumer.assign([]) 50 | assert consumer.assignment() == set() 51 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | Install 2 | ####### 3 | 4 | Install with your favorite package manager 5 | 6 | Latest Release 7 | ************** 8 | Pip: 9 | 10 | .. code:: bash 11 | 12 | pip install kafka-python 13 | 14 | Releases are also listed at https://github.com/dpkp/kafka-python/releases 15 | 16 | 17 | Bleeding-Edge 18 | ************* 19 | 20 | .. code:: bash 21 | 22 | git clone https://github.com/dpkp/kafka-python 23 | pip install ./kafka-python 24 | 25 | 26 | Optional crc32c install 27 | *********************** 28 | Highly recommended if you are using Kafka 11+ brokers. For those `kafka-python` 29 | uses a new message protocol version, that requires calculation of `crc32c`, 30 | which differs from the `zlib.crc32` hash implementation. By default `kafka-python` 31 | calculates it in pure python, which is quite slow. To speed it up we optionally 32 | support https://pypi.python.org/pypi/crc32c package if it's installed. 33 | 34 | .. code:: bash 35 | 36 | pip install 'kafka-python[crc32c]' 37 | 38 | 39 | Optional ZSTD install 40 | ******************** 41 | 42 | To enable ZSTD compression/decompression, install python-zstandard: 43 | 44 | >>> pip install 'kafka-python[zstd]' 45 | 46 | 47 | Optional LZ4 install 48 | ******************** 49 | 50 | To enable LZ4 compression/decompression, install python-lz4: 51 | 52 | >>> pip install 'kafka-python[lz4]' 53 | 54 | 55 | Optional Snappy install 56 | *********************** 57 | 58 | Install Development Libraries 59 | ============================= 60 | 61 | Download and build Snappy from https://google.github.io/snappy/ 62 | 63 | Ubuntu: 64 | 65 | .. code:: bash 66 | 67 | apt-get install libsnappy-dev 68 | 69 | OSX: 70 | 71 | .. code:: bash 72 | 73 | brew install snappy 74 | 75 | From Source: 76 | 77 | .. code:: bash 78 | 79 | wget https://github.com/google/snappy/releases/download/1.1.3/snappy-1.1.3.tar.gz 80 | tar xzvf snappy-1.1.3.tar.gz 81 | cd snappy-1.1.3 82 | ./configure 83 | make 84 | sudo make install 85 | 86 | Install Python Module 87 | ===================== 88 | 89 | Install the `python-snappy` module 90 | 91 | .. code:: bash 92 | 93 | pip install 'kafka-python[snappy]' 94 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/sticky/sorted_set.py: -------------------------------------------------------------------------------- 1 | class SortedSet: 2 | def __init__(self, iterable=None, key=None): 3 | self._key = key if key is not None else lambda x: x 4 | self._set = set(iterable) if iterable is not None else set() 5 | 6 | self._cached_last = None 7 | self._cached_first = None 8 | 9 | def first(self): 10 | if self._cached_first is not None: 11 | return self._cached_first 12 | 13 | first = None 14 | for element in self._set: 15 | if first is None or self._key(first) > self._key(element): 16 | first = element 17 | self._cached_first = first 18 | return first 19 | 20 | def last(self): 21 | if self._cached_last is not None: 22 | return self._cached_last 23 | 24 | last = None 25 | for element in self._set: 26 | if last is None or self._key(last) < self._key(element): 27 | last = element 28 | self._cached_last = last 29 | return last 30 | 31 | def pop_last(self): 32 | value = self.last() 33 | self._set.remove(value) 34 | self._cached_last = None 35 | return value 36 | 37 | def add(self, value): 38 | if self._cached_last is not None and self._key(value) > self._key(self._cached_last): 39 | self._cached_last = value 40 | if self._cached_first is not None and self._key(value) < self._key(self._cached_first): 41 | self._cached_first = value 42 | 43 | return self._set.add(value) 44 | 45 | def remove(self, value): 46 | if self._cached_last is not None and self._cached_last == value: 47 | self._cached_last = None 48 | if self._cached_first is not None and self._cached_first == value: 49 | self._cached_first = None 50 | 51 | return self._set.remove(value) 52 | 53 | def __contains__(self, value): 54 | return value in self._set 55 | 56 | def __iter__(self): 57 | return iter(sorted(self._set, key=self._key)) 58 | 59 | def _bool(self): 60 | return len(self._set) != 0 61 | 62 | __nonzero__ = _bool 63 | __bool__ = _bool 64 | -------------------------------------------------------------------------------- /test/testutil.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import re 4 | import string 5 | import time 6 | 7 | import pytest 8 | 9 | import kafka.codec 10 | 11 | 12 | def special_to_underscore(string, _matcher=re.compile(r'[^a-zA-Z0-9_]+')): 13 | return _matcher.sub('_', string) 14 | 15 | 16 | def random_string(length): 17 | return "".join(random.choice(string.ascii_letters) for i in range(length)) 18 | 19 | 20 | def env_kafka_version(): 21 | """Return the Kafka version set in the OS environment as a tuple. 22 | 23 | Example: '0.8.1.1' --> (0, 8, 1, 1) 24 | """ 25 | if 'KAFKA_VERSION' not in os.environ: 26 | return () 27 | return tuple(map(int, os.environ['KAFKA_VERSION'].split('.'))) 28 | 29 | 30 | def assert_message_count(messages, num_messages): 31 | """Check that we received the expected number of messages with no duplicates.""" 32 | # Make sure we got them all 33 | assert len(messages) == num_messages, 'Expected %d messages, got %d' % (num_messages, len(messages)) 34 | # Make sure there are no duplicates 35 | # Note: Currently duplicates are identified only using key/value. Other attributes like topic, partition, headers, 36 | # timestamp, etc are ignored... this could be changed if necessary, but will be more tolerant of dupes. 37 | unique_messages = {(m.key, m.value) for m in messages} 38 | assert len(unique_messages) == num_messages, 'Expected %d unique messages, got %d' % (num_messages, len(unique_messages)) 39 | 40 | 41 | def maybe_skip_unsupported_compression(compression_type): 42 | codecs = {1: 'gzip', 2: 'snappy', 3: 'lz4', 4: 'zstd'} 43 | if not compression_type: 44 | return 45 | elif compression_type in codecs: 46 | compression_type = codecs[compression_type] 47 | 48 | checker = getattr(kafka.codec, 'has_' + compression_type, None) 49 | if checker and not checker(): 50 | pytest.skip("Compression libraries not installed for %s" % (compression_type,)) 51 | 52 | 53 | class Timer(object): 54 | def __enter__(self): 55 | self.start = time.time() 56 | return self 57 | 58 | def __exit__(self, *args): 59 | self.end = time.time() 60 | self.interval = self.end - self.start 61 | -------------------------------------------------------------------------------- /kafka/protocol/struct.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from io import BytesIO 3 | 4 | from kafka.protocol.abstract import AbstractType 5 | from kafka.protocol.types import Schema 6 | 7 | from kafka.util import WeakMethod 8 | 9 | 10 | class Struct(metaclass=abc.ABCMeta): 11 | 12 | @abc.abstractproperty 13 | def SCHEMA(self): 14 | """An instance of Schema() representing the structure""" 15 | pass 16 | 17 | def __init__(self, *args, **kwargs): 18 | if len(args) == len(self.SCHEMA): 19 | for i, name in enumerate(self.SCHEMA.names): 20 | setattr(self, name, args[i]) 21 | elif len(args) > 0: 22 | raise ValueError('Args must be empty or mirror schema') 23 | else: 24 | for name in self.SCHEMA.names: 25 | setattr(self, name, kwargs.pop(name, None)) 26 | if kwargs: 27 | raise ValueError('Keyword(s) not in schema %s: %s' 28 | % (list(self.SCHEMA.names), 29 | ', '.join(kwargs.keys()))) 30 | 31 | def encode(self): 32 | return self.SCHEMA.encode( 33 | [getattr(self, name) for name in self.SCHEMA.names] 34 | ) 35 | 36 | @classmethod 37 | def decode(cls, data): 38 | if isinstance(data, bytes): 39 | data = BytesIO(data) 40 | return cls(*cls.SCHEMA.decode(data)) 41 | 42 | def get_item(self, name): 43 | if name not in self.SCHEMA.names: 44 | raise KeyError("%s is not in the schema" % name) 45 | return getattr(self, name) 46 | 47 | def __repr__(self): 48 | key_vals = [] 49 | for name, field in zip(self.SCHEMA.names, self.SCHEMA.fields): 50 | key_vals.append('%s=%s' % (name, field.repr(getattr(self, name)))) 51 | return self.__class__.__name__ + '(' + ', '.join(key_vals) + ')' 52 | 53 | def __hash__(self): 54 | return hash(self.encode()) 55 | 56 | def __eq__(self, other): 57 | if self.SCHEMA != other.SCHEMA: 58 | return False 59 | for attr in self.SCHEMA.names: 60 | if getattr(self, attr) != getattr(other, attr): 61 | return False 62 | return True 63 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import threading 3 | import time 4 | 5 | from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer 6 | from kafka.admin import NewTopic 7 | 8 | 9 | class Producer(threading.Thread): 10 | def __init__(self): 11 | threading.Thread.__init__(self) 12 | self.stop_event = threading.Event() 13 | 14 | def stop(self): 15 | self.stop_event.set() 16 | 17 | def run(self): 18 | producer = KafkaProducer(bootstrap_servers='localhost:9092') 19 | 20 | while not self.stop_event.is_set(): 21 | producer.send('my-topic', b"test") 22 | producer.send('my-topic', b"\xc2Hola, mundo!") 23 | time.sleep(1) 24 | 25 | producer.close() 26 | 27 | 28 | class Consumer(threading.Thread): 29 | def __init__(self): 30 | threading.Thread.__init__(self) 31 | self.stop_event = threading.Event() 32 | 33 | def stop(self): 34 | self.stop_event.set() 35 | 36 | def run(self): 37 | consumer = KafkaConsumer(bootstrap_servers='localhost:9092', 38 | auto_offset_reset='earliest', 39 | consumer_timeout_ms=1000) 40 | consumer.subscribe(['my-topic']) 41 | 42 | while not self.stop_event.is_set(): 43 | for message in consumer: 44 | print(message) 45 | if self.stop_event.is_set(): 46 | break 47 | 48 | consumer.close() 49 | 50 | 51 | def main(): 52 | # Create 'my-topic' Kafka topic 53 | try: 54 | admin = KafkaAdminClient(bootstrap_servers='localhost:9092') 55 | 56 | topic = NewTopic(name='my-topic', 57 | num_partitions=1, 58 | replication_factor=1) 59 | admin.create_topics([topic]) 60 | except Exception: 61 | pass 62 | 63 | tasks = [ 64 | Producer(), 65 | Consumer() 66 | ] 67 | 68 | # Start threads of a publisher/producer and a subscriber/consumer to 'my-topic' Kafka topic 69 | for t in tasks: 70 | t.start() 71 | 72 | time.sleep(10) 73 | 74 | # Stop threads 75 | for task in tasks: 76 | task.stop() 77 | 78 | for task in tasks: 79 | task.join() 80 | 81 | 82 | if __name__ == "__main__": 83 | main() 84 | -------------------------------------------------------------------------------- /kafka/benchmarks/record_batch_compose.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import hashlib 3 | import itertools 4 | import os 5 | import random 6 | 7 | import pyperf 8 | 9 | from kafka.record.memory_records import MemoryRecordsBuilder 10 | 11 | 12 | DEFAULT_BATCH_SIZE = 1600 * 1024 13 | KEY_SIZE = 6 14 | VALUE_SIZE = 60 15 | TIMESTAMP_RANGE = [1505824130000, 1505824140000] 16 | 17 | # With values above v1 record is 100 bytes, so 10 000 bytes for 100 messages 18 | MESSAGES_PER_BATCH = 100 19 | 20 | 21 | def random_bytes(length): 22 | buffer = bytearray(length) 23 | for i in range(length): 24 | buffer[i] = random.randint(0, 255) 25 | return bytes(buffer) 26 | 27 | 28 | def prepare(): 29 | return iter(itertools.cycle([ 30 | (random_bytes(KEY_SIZE), 31 | random_bytes(VALUE_SIZE), 32 | random.randint(*TIMESTAMP_RANGE) 33 | ) 34 | for _ in range(int(MESSAGES_PER_BATCH * 1.94)) 35 | ])) 36 | 37 | 38 | def finalize(results): 39 | # Just some strange code to make sure PyPy does execute the main code 40 | # properly, without optimizing it away 41 | hash_val = hashlib.md5() 42 | for buf in results: 43 | hash_val.update(buf) 44 | print(hash_val, file=open(os.devnull, "w")) 45 | 46 | 47 | def func(loops, magic): 48 | # Jit can optimize out the whole function if the result is the same each 49 | # time, so we need some randomized input data ) 50 | precomputed_samples = prepare() 51 | results = [] 52 | 53 | # Main benchmark code. 54 | t0 = pyperf.perf_counter() 55 | for _ in range(loops): 56 | batch = MemoryRecordsBuilder( 57 | magic, batch_size=DEFAULT_BATCH_SIZE, compression_type=0) 58 | for _ in range(MESSAGES_PER_BATCH): 59 | key, value, timestamp = next(precomputed_samples) 60 | size = batch.append( 61 | timestamp=timestamp, key=key, value=value) 62 | assert size 63 | batch.close() 64 | results.append(batch.buffer()) 65 | 66 | res = pyperf.perf_counter() - t0 67 | 68 | finalize(results) 69 | 70 | return res 71 | 72 | 73 | if __name__ == '__main__': 74 | runner = pyperf.Runner() 75 | runner.bench_time_func('batch_append_v0', func, 0) 76 | runner.bench_time_func('batch_append_v1', func, 1) 77 | runner.bench_time_func('batch_append_v2', func, 2) 78 | -------------------------------------------------------------------------------- /kafka/benchmarks/record_batch_read.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import hashlib 3 | import itertools 4 | import os 5 | import random 6 | 7 | import pyperf 8 | 9 | from kafka.record.memory_records import MemoryRecords, MemoryRecordsBuilder 10 | 11 | 12 | DEFAULT_BATCH_SIZE = 1600 * 1024 13 | KEY_SIZE = 6 14 | VALUE_SIZE = 60 15 | TIMESTAMP_RANGE = [1505824130000, 1505824140000] 16 | 17 | BATCH_SAMPLES = 5 18 | MESSAGES_PER_BATCH = 100 19 | 20 | 21 | def random_bytes(length): 22 | buffer = bytearray(length) 23 | for i in range(length): 24 | buffer[i] = random.randint(0, 255) 25 | return bytes(buffer) 26 | 27 | 28 | def prepare(magic): 29 | samples = [] 30 | for _ in range(BATCH_SAMPLES): 31 | batch = MemoryRecordsBuilder( 32 | magic, batch_size=DEFAULT_BATCH_SIZE, compression_type=0) 33 | for _ in range(MESSAGES_PER_BATCH): 34 | size = batch.append( 35 | random.randint(*TIMESTAMP_RANGE), 36 | random_bytes(KEY_SIZE), 37 | random_bytes(VALUE_SIZE), 38 | headers=[]) 39 | assert size 40 | batch.close() 41 | samples.append(bytes(batch.buffer())) 42 | 43 | return iter(itertools.cycle(samples)) 44 | 45 | 46 | def finalize(results): 47 | # Just some strange code to make sure PyPy does execute the code above 48 | # properly 49 | hash_val = hashlib.md5() 50 | for buf in results: 51 | hash_val.update(buf) 52 | print(hash_val, file=open(os.devnull, "w")) 53 | 54 | 55 | def func(loops, magic): 56 | # Jit can optimize out the whole function if the result is the same each 57 | # time, so we need some randomized input data ) 58 | precomputed_samples = prepare(magic) 59 | results = [] 60 | 61 | # Main benchmark code. 62 | batch_data = next(precomputed_samples) 63 | t0 = pyperf.perf_counter() 64 | for _ in range(loops): 65 | records = MemoryRecords(batch_data) 66 | while records.has_next(): 67 | batch = records.next_batch() 68 | batch.validate_crc() 69 | for record in batch: 70 | results.append(record.value) 71 | 72 | res = pyperf.perf_counter() - t0 73 | finalize(results) 74 | 75 | return res 76 | 77 | 78 | if __name__ == '__main__': 79 | runner = pyperf.Runner() 80 | runner.bench_time_func('batch_read_v0', func, 0) 81 | runner.bench_time_func('batch_read_v1', func, 1) 82 | runner.bench_time_func('batch_read_v2', func, 2) 83 | -------------------------------------------------------------------------------- /kafka/protocol/txn_offset_commit.py: -------------------------------------------------------------------------------- 1 | from kafka.protocol.api import Request, Response 2 | from kafka.protocol.types import Array, Int16, Int32, Int64, Schema, String 3 | 4 | 5 | class TxnOffsetCommitResponse_v0(Response): 6 | API_KEY = 28 7 | API_VERSION = 0 8 | SCHEMA = Schema( 9 | ('throttle_time_ms', Int32), 10 | ('topics', Array( 11 | ('topic', String('utf-8')), 12 | ('partitions', Array( 13 | ('partition', Int32), 14 | ('error_code', Int16)))))) 15 | 16 | 17 | class TxnOffsetCommitResponse_v1(Response): 18 | API_KEY = 28 19 | API_VERSION = 1 20 | SCHEMA = TxnOffsetCommitResponse_v0.SCHEMA 21 | 22 | 23 | class TxnOffsetCommitResponse_v2(Response): 24 | API_KEY = 28 25 | API_VERSION = 2 26 | SCHEMA = TxnOffsetCommitResponse_v1.SCHEMA 27 | 28 | 29 | class TxnOffsetCommitRequest_v0(Request): 30 | API_KEY = 28 31 | API_VERSION = 0 32 | RESPONSE_TYPE = TxnOffsetCommitResponse_v0 33 | SCHEMA = Schema( 34 | ('transactional_id', String('utf-8')), 35 | ('group_id', String('utf-8')), 36 | ('producer_id', Int64), 37 | ('producer_epoch', Int16), 38 | ('topics', Array( 39 | ('topic', String('utf-8')), 40 | ('partitions', Array( 41 | ('partition', Int32), 42 | ('offset', Int64), 43 | ('metadata', String('utf-8'))))))) 44 | 45 | 46 | class TxnOffsetCommitRequest_v1(Request): 47 | API_KEY = 28 48 | API_VERSION = 1 49 | RESPONSE_TYPE = TxnOffsetCommitResponse_v1 50 | SCHEMA = TxnOffsetCommitRequest_v0.SCHEMA 51 | 52 | 53 | class TxnOffsetCommitRequest_v2(Request): 54 | API_KEY = 28 55 | API_VERSION = 2 56 | RESPONSE_TYPE = TxnOffsetCommitResponse_v2 57 | SCHEMA = Schema( 58 | ('transactional_id', String('utf-8')), 59 | ('group_id', String('utf-8')), 60 | ('producer_id', Int64), 61 | ('producer_epoch', Int16), 62 | ('topics', Array( 63 | ('topic', String('utf-8')), 64 | ('partitions', Array( 65 | ('partition', Int32), 66 | ('offset', Int64), 67 | ('leader_epoch', Int32), 68 | ('metadata', String('utf-8'))))))) 69 | 70 | 71 | TxnOffsetCommitRequest = [ 72 | TxnOffsetCommitRequest_v0, TxnOffsetCommitRequest_v1, TxnOffsetCommitRequest_v2, 73 | ] 74 | TxnOffsetCommitResponse = [ 75 | TxnOffsetCommitResponse_v0, TxnOffsetCommitResponse_v1, TxnOffsetCommitResponse_v2, 76 | ] 77 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/kafka.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ############################# Server Basics ############################# 17 | 18 | broker.id={broker_id} 19 | 20 | ############################# Socket Server Settings ############################# 21 | 22 | port={port} 23 | host.name={host} 24 | 25 | num.network.threads=2 26 | num.io.threads=2 27 | 28 | socket.send.buffer.bytes=1048576 29 | socket.receive.buffer.bytes=1048576 30 | socket.request.max.bytes=104857600 31 | 32 | ############################# Log Basics ############################# 33 | 34 | log.dirs={tmp_dir}/data 35 | num.partitions={partitions} 36 | default.replication.factor={replicas} 37 | 38 | ## Short Replica Lag -- Drops failed brokers out of ISR 39 | replica.lag.time.max.ms=1000 40 | replica.socket.timeout.ms=1000 41 | 42 | ############################# Log Flush Policy ############################# 43 | 44 | log.flush.interval.messages=10000 45 | log.flush.interval.ms=1000 46 | 47 | ############################# Log Retention Policy ############################# 48 | 49 | log.retention.hours=168 50 | log.segment.bytes=536870912 51 | log.cleanup.interval.mins=1 52 | 53 | ############################# Zookeeper ############################# 54 | 55 | zookeeper.connect={zk_host}:{zk_port}/{zk_chroot} 56 | 57 | # Timeout in ms for connecting to zookeeper 58 | zookeeper.connection.timeout.ms=1000000 59 | # We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly 60 | zookeeper.session.timeout.ms=500 61 | 62 | kafka.metrics.polling.interval.secs=5 63 | kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter 64 | kafka.csv.metrics.dir={tmp_dir} 65 | kafka.csv.metrics.reporter.enabled=false 66 | 67 | log.cleanup.policy=delete 68 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/kafka.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ############################# Server Basics ############################# 17 | 18 | broker.id={broker_id} 19 | 20 | ############################# Socket Server Settings ############################# 21 | 22 | port={port} 23 | host.name={host} 24 | 25 | num.network.threads=2 26 | num.io.threads=2 27 | 28 | socket.send.buffer.bytes=1048576 29 | socket.receive.buffer.bytes=1048576 30 | socket.request.max.bytes=104857600 31 | 32 | ############################# Log Basics ############################# 33 | 34 | log.dirs={tmp_dir}/data 35 | num.partitions={partitions} 36 | default.replication.factor={replicas} 37 | 38 | ## Short Replica Lag -- Drops failed brokers out of ISR 39 | replica.lag.time.max.ms=1000 40 | replica.socket.timeout.ms=1000 41 | 42 | ############################# Log Flush Policy ############################# 43 | 44 | log.flush.interval.messages=10000 45 | log.flush.interval.ms=1000 46 | 47 | ############################# Log Retention Policy ############################# 48 | 49 | log.retention.hours=168 50 | log.segment.bytes=536870912 51 | log.retention.check.interval.ms=60000 52 | log.cleanup.interval.mins=1 53 | log.cleaner.enable=false 54 | 55 | ############################# Zookeeper ############################# 56 | 57 | # Zookeeper connection string (see zookeeper docs for details). 58 | # This is a comma separated host:port pairs, each corresponding to a zk 59 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 60 | # You can also append an optional chroot string to the urls to specify the 61 | # root directory for all kafka znodes. 62 | zookeeper.connect={zk_host}:{zk_port}/{zk_chroot} 63 | 64 | # Timeout in ms for connecting to zookeeper 65 | zookeeper.connection.timeout.ms=1000000 66 | # We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly 67 | zookeeper.session.timeout.ms=500 68 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For most projects, this workflow file will not need changing; you simply need 3 | # to commit it to your repository. 4 | # 5 | # You may wish to alter this file to override the set of languages analyzed, 6 | # or to provide custom queries or build logic. 7 | # 8 | # ******** NOTE ******** 9 | # We have attempted to detect the languages in your repository. Please check 10 | # the `language` matrix defined below to confirm you have the correct set of 11 | # supported CodeQL languages. 12 | # 13 | name: CodeQL 14 | on: 15 | push: 16 | branches: [master] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [master] 20 | schedule: 21 | - cron: 19 10 * * 6 22 | jobs: 23 | analyze: 24 | name: Analyze 25 | runs-on: ubuntu-latest 26 | permissions: 27 | actions: read 28 | contents: read 29 | security-events: write 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | language: [python] 34 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 35 | # Learn more: 36 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 37 | steps: 38 | - name: Checkout repository 39 | uses: actions/checkout@v6 40 | 41 | # Initializes the CodeQL tools for scanning. 42 | - name: Initialize CodeQL 43 | uses: github/codeql-action/init@v4 44 | with: 45 | languages: ${{ matrix.language }} 46 | # If you wish to specify custom queries, you can do so here or in a config file. 47 | # By default, queries listed here will override any specified in a config file. 48 | # Prefix the list here with "+" to use these queries and those in the config file. 49 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 50 | 51 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 52 | # If this step fails, then you should remove it and run the build manually (see below) 53 | - name: Autobuild 54 | uses: github/codeql-action/autobuild@v4 55 | 56 | # ℹ️ Command-line programs to run using the OS shell. 57 | # 📚 https://git.io/JvXDl 58 | 59 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 60 | # and modify them (or add more) to build your code if your project 61 | # uses a compiled language 62 | 63 | #- run: | 64 | # make bootstrap 65 | # make release 66 | - name: Perform CodeQL Analysis 67 | uses: github/codeql-action/analyze@v4 68 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | # Current Maintainer 2 | * Dana Powers, [@dpkp](https://github.com/dpkp) 3 | 4 | # Original Author and First Commit 5 | * David Arthur, [@mumrah](https://github.com/mumrah) 6 | 7 | # Contributors - 2015 (alpha by username) 8 | * Alex Couture-Beil, [@alexcb](https://github.com/alexcb) 9 | * Ali-Akber Saifee, [@alisaifee](https://github.com/alisaifee) 10 | * Christophe-Marie Duquesne, [@chmduquesne](https://github.com/chmduquesne) 11 | * Thomas Dimson, [@cosbynator](https://github.com/cosbynator) 12 | * Kasper Jacobsen, [@Dinoshauer](https://github.com/Dinoshauer) 13 | * Ross Duggan, [@duggan](https://github.com/duggan) 14 | * Enrico Canzonieri, [@ecanzonieri](https://github.com/ecanzonieri) 15 | * haosdent, [@haosdent](https://github.com/haosdent) 16 | * Arturo Filastò, [@hellais](https://github.com/hellais) 17 | * Job Evers‐Meltzer, [@jobevers](https://github.com/jobevers) 18 | * Martin Olveyra, [@kalessin](https://github.com/kalessin) 19 | * Kubilay Kocak, [@koobs](https://github.com/koobs) 20 | * Matthew L Daniel 21 | * Eric Hewitt, [@meandthewallaby](https://github.com/meandthewallaby) 22 | * Oliver Jowett [@mutability](https://github.com/mutability) 23 | * Shaolei Zhou, [@reAsOn2010](https://github.com/reAsOn2010) 24 | * Oskari Saarenmaa, [@saaros](https://github.com/saaros) 25 | * John Anderson, [@sontek](https://github.com/sontek) 26 | * Eduard Iskandarov, [@toidi](https://github.com/toidi) 27 | * Todd Palino, [@toddpalino](https://github.com/toddpalino) 28 | * trbs, [@trbs](https://github.com/trbs) 29 | * Viktor Shlapakov, [@vshlapakov](https://github.com/vshlapakov) 30 | * Will Daly, [@wedaly](https://github.com/wedaly) 31 | * Warren Kiser, [@wkiser](https://github.com/wkiser) 32 | * William Ting, [@wting](https://github.com/wting) 33 | * Zack Dever, [@zackdever](https://github.com/zackdever) 34 | 35 | # More Contributors 36 | * Bruno Renié, [@brutasse](https://github.com/brutasse) 37 | * Thomas Dimson, [@cosbynator](https://github.com/cosbynator) 38 | * Jesse Myers, [@jessemyers](https://github.com/jessemyers) 39 | * Mahendra M, [@mahendra](https://github.com/mahendra) 40 | * Miguel Eduardo Gil Biraud, [@mgilbir](https://github.com/mgilbir) 41 | * Marc Labbé, [@mrtheb](https://github.com/mrtheb) 42 | * Patrick Lucas, [@patricklucas](https://github.com/patricklucas) 43 | * Omar Ghishan, [@rdiomar](https://github.com/rdiomar) - RIP, Omar. 2014 44 | * Ivan Pouzyrevsky, [@sandello](https://github.com/sandello) 45 | * Lou Marvin Caraig, [@se7entyse7en](https://github.com/se7entyse7en) 46 | * waliaashish85, [@waliaashish85](https://github.com/waliaashish85) 47 | * Mark Roberts, [@wizzat](https://github.com/wizzat) 48 | * Christophe Lecointe [@christophelec](https://github.com/christophelec) 49 | * Mohamed Helmi Hichri [@hellich](https://github.com/hellich) 50 | 51 | Thanks to all who have contributed! 52 | -------------------------------------------------------------------------------- /kafka/cli/producer/__init__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import sys 4 | 5 | from kafka import KafkaProducer 6 | 7 | 8 | def main_parser(): 9 | parser = argparse.ArgumentParser( 10 | prog='python -m kafka.producer', 11 | description='Kafka console producer', 12 | ) 13 | parser.add_argument( 14 | '-b', '--bootstrap-servers', type=str, action='append', required=True, 15 | help='host:port for cluster bootstrap servers') 16 | parser.add_argument( 17 | '-t', '--topic', type=str, required=True, 18 | help='publish to topic') 19 | parser.add_argument( 20 | '-c', '--extra-config', type=str, action='append', 21 | help='additional configuration properties for kafka producer') 22 | parser.add_argument( 23 | '-l', '--log-level', type=str, 24 | help='logging level, passed to logging.basicConfig') 25 | parser.add_argument( 26 | '--encoding', type=str, default='utf-8', 27 | help='byte encoding for produced messages') 28 | return parser 29 | 30 | 31 | _LOGGING_LEVELS = {'NOTSET': 0, 'DEBUG': 10, 'INFO': 20, 'WARNING': 30, 'ERROR': 40, 'CRITICAL': 50} 32 | 33 | 34 | def build_kwargs(props): 35 | kwargs = {} 36 | for prop in props or []: 37 | k, v = prop.split('=') 38 | try: 39 | v = int(v) 40 | except ValueError: 41 | pass 42 | if v == 'None': 43 | v = None 44 | elif v == 'False': 45 | v = False 46 | elif v == 'True': 47 | v = True 48 | kwargs[k] = v 49 | return kwargs 50 | 51 | 52 | def run_cli(args=None): 53 | parser = main_parser() 54 | config = parser.parse_args(args) 55 | if config.log_level: 56 | logging.basicConfig(level=_LOGGING_LEVELS[config.log_level.upper()]) 57 | logger = logging.getLogger(__name__) 58 | 59 | kwargs = build_kwargs(config.extra_config) 60 | producer = KafkaProducer(bootstrap_servers=config.bootstrap_servers, **kwargs) 61 | 62 | def log_result(res_or_err): 63 | if isinstance(res_or_err, Exception): 64 | logger.error("Error producing message", exc_info=res_or_err) 65 | else: 66 | logger.info("Message produced: %s", res_or_err) 67 | 68 | try: 69 | while True: 70 | try: 71 | value = input() 72 | except EOFError: 73 | value = sys.stdin.read().rstrip('\n') 74 | if not value: 75 | return 0 76 | producer.send(config.topic, value=value.encode(config.encoding)).add_both(log_result) 77 | except KeyboardInterrupt: 78 | logger.info('Bye!') 79 | return 0 80 | except Exception: 81 | logger.exception('Error!') 82 | return 1 83 | finally: 84 | producer.close() 85 | -------------------------------------------------------------------------------- /kafka/metrics/dict_reporter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import threading 3 | 4 | from kafka.metrics.metrics_reporter import AbstractMetricsReporter 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | class DictReporter(AbstractMetricsReporter): 10 | """A basic dictionary based metrics reporter. 11 | 12 | Store all metrics in a two level dictionary of category > name > metric. 13 | """ 14 | def __init__(self, prefix=''): 15 | self._lock = threading.Lock() 16 | self._prefix = prefix if prefix else '' # never allow None 17 | self._store = {} 18 | 19 | def snapshot(self): 20 | """ 21 | Return a nested dictionary snapshot of all metrics and their 22 | values at this time. Example: 23 | { 24 | 'category': { 25 | 'metric1_name': 42.0, 26 | 'metric2_name': 'foo' 27 | } 28 | } 29 | """ 30 | return dict((category, dict((name, metric.value()) 31 | for name, metric in list(metrics.items()))) 32 | for category, metrics in 33 | list(self._store.items())) 34 | 35 | def init(self, metrics): 36 | for metric in metrics: 37 | self.metric_change(metric) 38 | 39 | def metric_change(self, metric): 40 | with self._lock: 41 | category = self.get_category(metric) 42 | if category not in self._store: 43 | self._store[category] = {} 44 | self._store[category][metric.metric_name.name] = metric 45 | 46 | def metric_removal(self, metric): 47 | with self._lock: 48 | category = self.get_category(metric) 49 | metrics = self._store.get(category, {}) 50 | removed = metrics.pop(metric.metric_name.name, None) 51 | if not metrics: 52 | self._store.pop(category, None) 53 | return removed 54 | 55 | def get_category(self, metric): 56 | """ 57 | Return a string category for the metric. 58 | 59 | The category is made up of this reporter's prefix and the 60 | metric's group and tags. 61 | 62 | Examples: 63 | prefix = 'foo', group = 'bar', tags = {'a': 1, 'b': 2} 64 | returns: 'foo.bar.a=1,b=2' 65 | 66 | prefix = 'foo', group = 'bar', tags = None 67 | returns: 'foo.bar' 68 | 69 | prefix = None, group = 'bar', tags = None 70 | returns: 'bar' 71 | """ 72 | tags = ','.join('%s=%s' % (k, v) for k, v in 73 | sorted(metric.metric_name.tags.items())) 74 | return '.'.join(x for x in 75 | [self._prefix, metric.metric_name.group, tags] if x) 76 | 77 | def configure(self, configs): 78 | pass 79 | 80 | def close(self): 81 | pass 82 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # Derived from https://github.com/actions/starter-workflows/blob/main/ci/python-package.yml 2 | # 3 | name: Python Package 4 | 5 | on: 6 | push: 7 | branches: ["master"] 8 | pull_request: 9 | branches: ["master"] 10 | 11 | env: 12 | FORCE_COLOR: "1" # Make tools pretty. 13 | PIP_DISABLE_PIP_VERSION_CHECK: "1" 14 | PIP_NO_PYTHON_VERSION_WARNING: "1" 15 | 16 | jobs: 17 | build: 18 | 19 | runs-on: ubuntu-latest 20 | name: "Test: python ${{ matrix.python }} / kafka ${{ matrix.kafka }}" 21 | continue-on-error: ${{ matrix.experimental || false }} 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | kafka: 26 | - "0.8.2.2" 27 | - "0.9.0.1" 28 | - "0.10.2.2" 29 | - "0.11.0.3" 30 | - "1.1.1" 31 | - "2.4.0" 32 | - "2.8.2" 33 | - "3.0.2" 34 | - "3.5.2" 35 | - "3.9.0" 36 | - "4.0.0" 37 | python: 38 | - "3.14" 39 | include: 40 | #- python: "pypy3.9" 41 | # kafka: "2.6.0" 42 | # experimental: true 43 | - python: "3.8" 44 | kafka: "4.0.0" 45 | - python: "3.9" 46 | kafka: "4.0.0" 47 | - python: "3.10" 48 | kafka: "4.0.0" 49 | - python: "3.11" 50 | kafka: "4.0.0" 51 | - python: "3.12" 52 | kafka: "4.0.0" 53 | - python: "3.13" 54 | kafka: "4.0.0" 55 | 56 | steps: 57 | - uses: actions/checkout@v6 58 | - name: Set up Python ${{ matrix.python }} 59 | uses: actions/setup-python@v6 60 | with: 61 | python-version: ${{ matrix.python }} 62 | cache: pip 63 | cache-dependency-path: | 64 | requirements-dev.txt 65 | - name: Install dependencies 66 | run: | 67 | sudo apt install -y libsnappy-dev libzstd-dev 68 | python -m pip install --upgrade pip 69 | pip install -r requirements-dev.txt 70 | - name: Pylint 71 | run: pylint --recursive=y --errors-only kafka test 72 | - name: Setup java 73 | uses: actions/setup-java@v5 74 | with: 75 | distribution: temurin 76 | java-version: 23 77 | - name: Restore cached kafka releases 78 | id: cache-servers-dist-restore 79 | uses: actions/cache/restore@v5 80 | with: 81 | path: servers/dist 82 | key: servers-dist-${{ matrix.kafka }} 83 | - name: Install Kafka release 84 | run: make servers/${{ matrix.kafka }}/kafka-bin 85 | - name: Update kafka release cache 86 | id: cache-servers-dist-save 87 | uses: actions/cache/save@v5 88 | with: 89 | path: servers/dist 90 | key: ${{ steps.cache-servers-dist-restore.outputs.cache-primary-key }} 91 | - name: Pytest 92 | run: make test 93 | env: 94 | KAFKA_VERSION: ${{ matrix.kafka }} 95 | -------------------------------------------------------------------------------- /kafka/partitioner/default.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | 4 | class DefaultPartitioner(object): 5 | """Default partitioner. 6 | 7 | Hashes key to partition using murmur2 hashing (from java client) 8 | If key is None, selects partition randomly from available, 9 | or from all partitions if none are currently available 10 | """ 11 | @classmethod 12 | def __call__(cls, key, all_partitions, available): 13 | """ 14 | Get the partition corresponding to key 15 | :param key: partitioning key 16 | :param all_partitions: list of all partitions sorted by partition ID 17 | :param available: list of available partitions in no particular order 18 | :return: one of the values from all_partitions or available 19 | """ 20 | if key is None: 21 | if available: 22 | return random.choice(available) 23 | return random.choice(all_partitions) 24 | 25 | idx = murmur2(key) 26 | idx &= 0x7fffffff 27 | idx %= len(all_partitions) 28 | return all_partitions[idx] 29 | 30 | 31 | # https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L244 32 | def murmur2(data): 33 | """Pure-python Murmur2 implementation. 34 | 35 | Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 36 | 37 | Args: 38 | data (bytes): opaque bytes 39 | 40 | Returns: MurmurHash2 of data 41 | """ 42 | length = len(data) 43 | seed = 0x9747b28c 44 | # 'm' and 'r' are mixing constants generated offline. 45 | # They're not really 'magic', they just happen to work well. 46 | m = 0x5bd1e995 47 | r = 24 48 | 49 | # Initialize the hash to a random value 50 | h = seed ^ length 51 | length4 = length // 4 52 | 53 | for i in range(length4): 54 | i4 = i * 4 55 | k = ((data[i4 + 0] & 0xff) + 56 | ((data[i4 + 1] & 0xff) << 8) + 57 | ((data[i4 + 2] & 0xff) << 16) + 58 | ((data[i4 + 3] & 0xff) << 24)) 59 | k &= 0xffffffff 60 | k *= m 61 | k &= 0xffffffff 62 | k ^= (k % 0x100000000) >> r # k ^= k >>> r 63 | k &= 0xffffffff 64 | k *= m 65 | k &= 0xffffffff 66 | 67 | h *= m 68 | h &= 0xffffffff 69 | h ^= k 70 | h &= 0xffffffff 71 | 72 | # Handle the last few bytes of the input array 73 | extra_bytes = length % 4 74 | if extra_bytes >= 3: 75 | h ^= (data[(length & ~3) + 2] & 0xff) << 16 76 | h &= 0xffffffff 77 | if extra_bytes >= 2: 78 | h ^= (data[(length & ~3) + 1] & 0xff) << 8 79 | h &= 0xffffffff 80 | if extra_bytes >= 1: 81 | h ^= (data[length & ~3] & 0xff) 82 | h &= 0xffffffff 83 | h *= m 84 | h &= 0xffffffff 85 | 86 | h ^= (h % 0x100000000) >> 13 # h >>> 13; 87 | h &= 0xffffffff 88 | h *= m 89 | h &= 0xffffffff 90 | h ^= (h % 0x100000000) >> 15 # h >>> 15; 91 | h &= 0xffffffff 92 | 93 | return h 94 | --------------------------------------------------------------------------------